2026-04-15T14:08:07.694 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-04-15T14:08:07.701 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-15T14:08:07.727 INFO:teuthology.run:Config: archive_path: /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369 branch: wip-sse-s3-on-v20.2.0 description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} email: null first_in_suite: false flavor: default job_id: '5369' last_in_suite: false machine_type: vps name: supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: wip-sse-s3-on-v20.2.0 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: logical_volumes: lv_1: scratch_dev: true size: 25%VG vg: vg_nvme lv_2: scratch_dev: true size: 25%VG vg: vg_nvme lv_3: scratch_dev: true size: 25%VG vg: vg_nvme lv_4: scratch_dev: true size: 25%VG vg: vg_nvme timezone: UTC volume_groups: vg_nvme: pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: 187293b0588135c3607a12257332b6880af4eff9 ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 install: ceph: flavor: default sha1: 187293b0588135c3607a12257332b6880af4eff9 extra_system_packages: deb: - python3-jmespath - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-jmespath - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-19-g7ec4401a095/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-19-g7ec4401a095/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-19-g7ec4401a095/el9.clyso/x86_64 s3tests: sha1: e0c4ff71baef6d5126a0201df5fe54196d89b296 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-wip-sse-s3-on-v20.2.0 sha1: d26583cfb673e959af010b749fed6e7dba141caf owner: supriti priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 9500 sha1: 187293b0588135c3607a12257332b6880af4eff9 sleep_before_teardown: 0 suite: orch:cephadm:smoke-roleless suite_branch: tt-wip-sse-s3-on-v20.2.0 suite_path: /home/teuthos/src/git.local_ceph_d26583cfb673e959af010b749fed6e7dba141caf/qa suite_relpath: qa suite_repo: http://git.local/ceph.git suite_sha1: d26583cfb673e959af010b749fed6e7dba141caf targets: vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCinw2bThIIJMj6rAFztoKxvnjJMjSTzdkztOB2sXEUwvlhuABfiMKqEYIAUyAARfSwJ0zVpfYo5SGFS83/eOL4= vm05.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJWgswN3/w3Q03wCLhFtITyyIe/dsN3+fJKpEjJ7ZZwFWXTxX7kiAXPWCtLA4VCwWUQ7mGvXe3OAmOmqP9sBJr4= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - cephadm.apply: specs: - placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 - placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: '{{VIP0}}/{{VIPPREFIXLEN}}' - cephadm.wait_for_service: service: rgw.foo - cephadm.wait_for_service: service: ingress.rgw.foo - cephadm.shell: host.a: - "echo \"Check while healthy...\"\ncurl http://{{VIP0}}:9000/\n\n# stop each\ \ rgw in turn\necho \"Check with each rgw stopped in turn...\"\nfor rgw in `ceph\ \ orch ps | grep ^rgw.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $rgw\n timeout 300 bash -c \"while ! ceph orch ps | grep $rgw | grep stopped;\ \ do echo 'Waiting for $rgw to stop'; ceph orch ps --daemon-type rgw; ceph health\ \ detail; sleep 5 ; done\"\n timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/\ \ ; do echo 'Waiting for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\ \n ceph orch daemon start $rgw\n timeout 300 bash -c \"while ! ceph orch ps\ \ | grep $rgw | grep running; do echo 'Waiting for $rgw to start'; ceph orch\ \ ps --daemon-type rgw; ceph health detail; sleep 5 ; done\"\ndone\n\n# stop\ \ each haproxy in turn\necho \"Check with each haproxy down in turn...\"\nfor\ \ haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do\n\ \ ceph orch daemon stop $haproxy\n timeout 300 bash -c \"while ! ceph orch\ \ ps | grep $haproxy | grep stopped; do echo 'Waiting for $haproxy to stop';\ \ ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\n\ \ timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting\ \ for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\n ceph orch\ \ daemon start $haproxy\n timeout 300 bash -c \"while ! ceph orch ps | grep\ \ $haproxy | grep running; do echo 'Waiting for $haproxy to start'; ceph orch\ \ ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\ndone\n\ntimeout\ \ 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting for http://{{VIP0}}:9000/\ \ to be available'; sleep 1 ; done\"\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/kshtsk/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-04-15_10:39:10 tube: vps user: supriti verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.3072398 2026-04-15T14:08:07.727 INFO:teuthology.run:suite_path is set to /home/teuthos/src/git.local_ceph_d26583cfb673e959af010b749fed6e7dba141caf/qa; will attempt to use it 2026-04-15T14:08:07.727 INFO:teuthology.run:Found tasks at /home/teuthos/src/git.local_ceph_d26583cfb673e959af010b749fed6e7dba141caf/qa/tasks 2026-04-15T14:08:07.727 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-04-15T14:08:07.728 INFO:teuthology.task.internal:Saving configuration 2026-04-15T14:08:07.734 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-04-15T14:08:07.735 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-04-15T14:08:07.742 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm04.local', 'description': '/archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-15 14:06:47.922540', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:04', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCinw2bThIIJMj6rAFztoKxvnjJMjSTzdkztOB2sXEUwvlhuABfiMKqEYIAUyAARfSwJ0zVpfYo5SGFS83/eOL4='} 2026-04-15T14:08:07.747 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm05.local', 'description': '/archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-15 14:06:47.922112', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:05', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJWgswN3/w3Q03wCLhFtITyyIe/dsN3+fJKpEjJ7ZZwFWXTxX7kiAXPWCtLA4VCwWUQ7mGvXe3OAmOmqP9sBJr4='} 2026-04-15T14:08:07.747 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-04-15T14:08:07.748 INFO:teuthology.task.internal:roles: ubuntu@vm04.local - ['host.a', 'client.0'] 2026-04-15T14:08:07.748 INFO:teuthology.task.internal:roles: ubuntu@vm05.local - ['host.b', 'client.1'] 2026-04-15T14:08:07.748 INFO:teuthology.run_tasks:Running task console_log... 2026-04-15T14:08:07.763 DEBUG:teuthology.task.console_log:vm04 does not support IPMI; excluding 2026-04-15T14:08:07.768 DEBUG:teuthology.task.console_log:vm05 does not support IPMI; excluding 2026-04-15T14:08:07.768 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f7b1e18bc70>, signals=[15]) 2026-04-15T14:08:07.768 INFO:teuthology.run_tasks:Running task internal.connect... 2026-04-15T14:08:07.769 INFO:teuthology.task.internal:Opening connections... 2026-04-15T14:08:07.769 DEBUG:teuthology.task.internal:connecting to ubuntu@vm04.local 2026-04-15T14:08:07.770 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T14:08:07.830 DEBUG:teuthology.task.internal:connecting to ubuntu@vm05.local 2026-04-15T14:08:07.831 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm05.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T14:08:07.893 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-04-15T14:08:07.894 DEBUG:teuthology.orchestra.run.vm04:> uname -m 2026-04-15T14:08:07.935 INFO:teuthology.orchestra.run.vm04.stdout:x86_64 2026-04-15T14:08:07.935 DEBUG:teuthology.orchestra.run.vm04:> cat /etc/os-release 2026-04-15T14:08:07.991 INFO:teuthology.orchestra.run.vm04.stdout:NAME="CentOS Stream" 2026-04-15T14:08:07.991 INFO:teuthology.orchestra.run.vm04.stdout:VERSION="9" 2026-04-15T14:08:07.991 INFO:teuthology.orchestra.run.vm04.stdout:ID="centos" 2026-04-15T14:08:07.991 INFO:teuthology.orchestra.run.vm04.stdout:ID_LIKE="rhel fedora" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_ID="9" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:PLATFORM_ID="platform:el9" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:PRETTY_NAME="CentOS Stream 9" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:ANSI_COLOR="0;31" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:LOGO="fedora-logo-icon" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:HOME_URL="https://centos.org/" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-04-15T14:08:07.992 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-04-15T14:08:07.992 INFO:teuthology.lock.ops:Updating vm04.local on lock server 2026-04-15T14:08:07.998 DEBUG:teuthology.orchestra.run.vm05:> uname -m 2026-04-15T14:08:08.020 INFO:teuthology.orchestra.run.vm05.stdout:x86_64 2026-04-15T14:08:08.020 DEBUG:teuthology.orchestra.run.vm05:> cat /etc/os-release 2026-04-15T14:08:08.077 INFO:teuthology.orchestra.run.vm05.stdout:NAME="CentOS Stream" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:VERSION="9" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:ID="centos" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:ID_LIKE="rhel fedora" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:VERSION_ID="9" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:PLATFORM_ID="platform:el9" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:PRETTY_NAME="CentOS Stream 9" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:ANSI_COLOR="0;31" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:LOGO="fedora-logo-icon" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:HOME_URL="https://centos.org/" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-04-15T14:08:08.078 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-04-15T14:08:08.078 INFO:teuthology.lock.ops:Updating vm05.local on lock server 2026-04-15T14:08:08.083 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-04-15T14:08:08.085 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-04-15T14:08:08.085 INFO:teuthology.task.internal:Checking for old test directory... 2026-04-15T14:08:08.085 DEBUG:teuthology.orchestra.run.vm04:> test '!' -e /home/ubuntu/cephtest 2026-04-15T14:08:08.087 DEBUG:teuthology.orchestra.run.vm05:> test '!' -e /home/ubuntu/cephtest 2026-04-15T14:08:08.135 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-04-15T14:08:08.137 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-04-15T14:08:08.137 DEBUG:teuthology.orchestra.run.vm04:> test -z $(ls -A /var/lib/ceph) 2026-04-15T14:08:08.143 DEBUG:teuthology.orchestra.run.vm05:> test -z $(ls -A /var/lib/ceph) 2026-04-15T14:08:08.157 INFO:teuthology.orchestra.run.vm04.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-15T14:08:08.193 INFO:teuthology.orchestra.run.vm05.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-15T14:08:08.193 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-04-15T14:08:08.201 DEBUG:teuthology.orchestra.run.vm04:> test -e /ceph-qa-ready 2026-04-15T14:08:08.217 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T14:08:08.412 DEBUG:teuthology.orchestra.run.vm05:> test -e /ceph-qa-ready 2026-04-15T14:08:08.427 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T14:08:08.620 INFO:teuthology.run_tasks:Running task internal.base... 2026-04-15T14:08:08.622 INFO:teuthology.task.internal:Creating test directory... 2026-04-15T14:08:08.622 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-15T14:08:08.624 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-15T14:08:08.641 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-04-15T14:08:08.642 INFO:teuthology.run_tasks:Running task internal.archive... 2026-04-15T14:08:08.643 INFO:teuthology.task.internal:Creating archive directory... 2026-04-15T14:08:08.643 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-15T14:08:08.680 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-15T14:08:08.699 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-04-15T14:08:08.701 INFO:teuthology.task.internal:Enabling coredump saving... 2026-04-15T14:08:08.701 DEBUG:teuthology.orchestra.run.vm04:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-15T14:08:08.751 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T14:08:08.751 DEBUG:teuthology.orchestra.run.vm05:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-15T14:08:08.767 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T14:08:08.767 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-15T14:08:08.794 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-15T14:08:08.819 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T14:08:08.830 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T14:08:08.834 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T14:08:08.844 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T14:08:08.846 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-04-15T14:08:08.847 INFO:teuthology.task.internal:Configuring sudo... 2026-04-15T14:08:08.848 DEBUG:teuthology.orchestra.run.vm04:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-15T14:08:08.875 DEBUG:teuthology.orchestra.run.vm05:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-15T14:08:08.913 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-04-15T14:08:08.915 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-04-15T14:08:08.915 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-15T14:08:08.945 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-15T14:08:08.969 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T14:08:09.025 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T14:08:09.085 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:08:09.085 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-15T14:08:09.146 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T14:08:09.170 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T14:08:09.228 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:08:09.228 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-15T14:08:09.291 DEBUG:teuthology.orchestra.run.vm04:> sudo service rsyslog restart 2026-04-15T14:08:09.293 DEBUG:teuthology.orchestra.run.vm05:> sudo service rsyslog restart 2026-04-15T14:08:09.321 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-15T14:08:09.359 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-15T14:08:09.723 INFO:teuthology.run_tasks:Running task internal.timer... 2026-04-15T14:08:09.725 INFO:teuthology.task.internal:Starting timer... 2026-04-15T14:08:09.725 INFO:teuthology.run_tasks:Running task pcp... 2026-04-15T14:08:09.728 INFO:teuthology.run_tasks:Running task selinux... 2026-04-15T14:08:09.730 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-04-15T14:08:09.730 INFO:teuthology.task.selinux:Excluding vm04: VMs are not yet supported 2026-04-15T14:08:09.730 INFO:teuthology.task.selinux:Excluding vm05: VMs are not yet supported 2026-04-15T14:08:09.730 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-04-15T14:08:09.730 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-04-15T14:08:09.730 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-04-15T14:08:09.730 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-04-15T14:08:09.732 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'logical_volumes': {'lv_1': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_2': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_3': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_4': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}}, 'timezone': 'UTC', 'volume_groups': {'vg_nvme': {'pvs': '/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde'}}}} 2026-04-15T14:08:09.732 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-04-15T14:08:09.733 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-04-15T14:08:10.341 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-04-15T14:08:10.347 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-04-15T14:08:10.347 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "logical_volumes": {"lv_1": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_2": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_3": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_4": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}}, "timezone": "UTC", "volume_groups": {"vg_nvme": {"pvs": "/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde"}}}' -i /tmp/teuth_ansible_inventoryn4nb7kgb --limit vm04.local,vm05.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-04-15T14:10:14.619 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm04.local'), Remote(name='ubuntu@vm05.local')] 2026-04-15T14:10:14.619 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm04.local' 2026-04-15T14:10:14.620 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T14:10:14.680 DEBUG:teuthology.orchestra.run.vm04:> true 2026-04-15T14:10:14.762 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm04.local' 2026-04-15T14:10:14.762 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm05.local' 2026-04-15T14:10:14.763 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm05.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T14:10:14.824 DEBUG:teuthology.orchestra.run.vm05:> true 2026-04-15T14:10:14.897 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm05.local' 2026-04-15T14:10:14.897 INFO:teuthology.run_tasks:Running task clock... 2026-04-15T14:10:14.900 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-04-15T14:10:14.900 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-15T14:10:14.900 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T14:10:14.901 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-15T14:10:14.901 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T14:10:14.932 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-04-15T14:10:14.947 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-04-15T14:10:14.970 INFO:teuthology.orchestra.run.vm05.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-04-15T14:10:14.975 INFO:teuthology.orchestra.run.vm04.stderr:sudo: ntpd: command not found 2026-04-15T14:10:14.982 INFO:teuthology.orchestra.run.vm05.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-04-15T14:10:14.985 INFO:teuthology.orchestra.run.vm04.stdout:506 Cannot talk to daemon 2026-04-15T14:10:15.000 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-04-15T14:10:15.004 INFO:teuthology.orchestra.run.vm05.stderr:sudo: ntpd: command not found 2026-04-15T14:10:15.015 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-04-15T14:10:15.015 INFO:teuthology.orchestra.run.vm05.stdout:506 Cannot talk to daemon 2026-04-15T14:10:15.027 INFO:teuthology.orchestra.run.vm05.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-04-15T14:10:15.039 INFO:teuthology.orchestra.run.vm05.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-04-15T14:10:15.067 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-04-15T14:10:15.071 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-15T14:10:15.071 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-04-15T14:10:15.071 INFO:teuthology.orchestra.run.vm04.stdout:^? s7.vonderste.in 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.071 INFO:teuthology.orchestra.run.vm04.stdout:^? ns.gunnarhofmann.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.071 INFO:teuthology.orchestra.run.vm04.stdout:^? 85.215.227.11 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.071 INFO:teuthology.orchestra.run.vm04.stdout:^? ntp2.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.090 INFO:teuthology.orchestra.run.vm05.stderr:bash: line 1: ntpq: command not found 2026-04-15T14:10:15.093 INFO:teuthology.orchestra.run.vm05.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-15T14:10:15.093 INFO:teuthology.orchestra.run.vm05.stdout:=============================================================================== 2026-04-15T14:10:15.093 INFO:teuthology.orchestra.run.vm05.stdout:^? ns.gunnarhofmann.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.093 INFO:teuthology.orchestra.run.vm05.stdout:^? s7.vonderste.in 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.093 INFO:teuthology.orchestra.run.vm05.stdout:^? 85.215.227.11 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.093 INFO:teuthology.orchestra.run.vm05.stdout:^? ntp2.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-15T14:10:15.093 INFO:teuthology.run_tasks:Running task pexec... 2026-04-15T14:10:15.096 INFO:teuthology.task.pexec:Executing custom commands... 2026-04-15T14:10:15.096 DEBUG:teuthology.orchestra.run.vm04:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-04-15T14:10:15.097 DEBUG:teuthology.orchestra.run.vm05:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-04-15T14:10:15.113 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo dnf remove nvme-cli -y 2026-04-15T14:10:15.113 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo dnf install nvmetcli nvme-cli -y 2026-04-15T14:10:15.113 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm04.local 2026-04-15T14:10:15.113 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-04-15T14:10:15.113 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-04-15T14:10:15.135 DEBUG:teuthology.task.pexec:ubuntu@vm05.local< sudo dnf remove nvme-cli -y 2026-04-15T14:10:15.135 DEBUG:teuthology.task.pexec:ubuntu@vm05.local< sudo dnf install nvmetcli nvme-cli -y 2026-04-15T14:10:15.135 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm05.local 2026-04-15T14:10:15.135 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-04-15T14:10:15.135 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-04-15T14:10:15.298 INFO:teuthology.orchestra.run.vm04.stdout:No match for argument: nvme-cli 2026-04-15T14:10:15.299 INFO:teuthology.orchestra.run.vm04.stderr:No packages marked for removal. 2026-04-15T14:10:15.301 INFO:teuthology.orchestra.run.vm04.stdout:Dependencies resolved. 2026-04-15T14:10:15.302 INFO:teuthology.orchestra.run.vm04.stdout:Nothing to do. 2026-04-15T14:10:15.302 INFO:teuthology.orchestra.run.vm04.stdout:Complete! 2026-04-15T14:10:15.338 INFO:teuthology.orchestra.run.vm05.stdout:No match for argument: nvme-cli 2026-04-15T14:10:15.338 INFO:teuthology.orchestra.run.vm05.stderr:No packages marked for removal. 2026-04-15T14:10:15.340 INFO:teuthology.orchestra.run.vm05.stdout:Dependencies resolved. 2026-04-15T14:10:15.341 INFO:teuthology.orchestra.run.vm05.stdout:Nothing to do. 2026-04-15T14:10:15.341 INFO:teuthology.orchestra.run.vm05.stdout:Complete! 2026-04-15T14:10:15.680 INFO:teuthology.orchestra.run.vm04.stdout:Last metadata expiration check: 0:01:28 ago on Wed 15 Apr 2026 02:08:47 PM UTC. 2026-04-15T14:10:15.726 INFO:teuthology.orchestra.run.vm05.stdout:Last metadata expiration check: 0:01:33 ago on Wed 15 Apr 2026 02:08:42 PM UTC. 2026-04-15T14:10:15.781 INFO:teuthology.orchestra.run.vm04.stdout:Dependencies resolved. 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: Package Architecture Version Repository Size 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:Installing: 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:Installing dependencies: 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:Transaction Summary 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:Install 6 Packages 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:Total download size: 2.3 M 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:Installed size: 11 M 2026-04-15T14:10:15.782 INFO:teuthology.orchestra.run.vm04.stdout:Downloading Packages: 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:Dependencies resolved. 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:================================================================================ 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: Package Architecture Version Repository Size 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:================================================================================ 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:Installing: 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:Installing dependencies: 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:Transaction Summary 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:================================================================================ 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:Install 6 Packages 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:Total download size: 2.3 M 2026-04-15T14:10:15.823 INFO:teuthology.orchestra.run.vm05.stdout:Installed size: 11 M 2026-04-15T14:10:15.824 INFO:teuthology.orchestra.run.vm05.stdout:Downloading Packages: 2026-04-15T14:10:16.015 INFO:teuthology.orchestra.run.vm05.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 473 kB/s | 44 kB 00:00 2026-04-15T14:10:16.036 INFO:teuthology.orchestra.run.vm04.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 312 kB/s | 44 kB 00:00 2026-04-15T14:10:16.044 INFO:teuthology.orchestra.run.vm04.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 484 kB/s | 72 kB 00:00 2026-04-15T14:10:16.074 INFO:teuthology.orchestra.run.vm05.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 474 kB/s | 72 kB 00:00 2026-04-15T14:10:16.078 INFO:teuthology.orchestra.run.vm05.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 1.3 MB/s | 84 kB 00:00 2026-04-15T14:10:16.104 INFO:teuthology.orchestra.run.vm04.stdout:(3/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.5 MB/s | 150 kB 00:00 2026-04-15T14:10:16.107 INFO:teuthology.orchestra.run.vm04.stdout:(4/6): python3-kmod-0.9-32.el9.x86_64.rpm 1.2 MB/s | 84 kB 00:00 2026-04-15T14:10:16.114 INFO:teuthology.orchestra.run.vm04.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 5.3 MB/s | 1.2 MB 00:00 2026-04-15T14:10:16.131 INFO:teuthology.orchestra.run.vm05.stdout:(4/6): nvme-cli-2.16-1.el9.x86_64.rpm 5.5 MB/s | 1.2 MB 00:00 2026-04-15T14:10:16.185 INFO:teuthology.orchestra.run.vm05.stdout:(5/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 1.3 MB/s | 150 kB 00:00 2026-04-15T14:10:16.202 INFO:teuthology.orchestra.run.vm04.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 8.4 MB/s | 837 kB 00:00 2026-04-15T14:10:16.203 INFO:teuthology.orchestra.run.vm04.stdout:-------------------------------------------------------------------------------- 2026-04-15T14:10:16.203 INFO:teuthology.orchestra.run.vm04.stdout:Total 5.5 MB/s | 2.3 MB 00:00 2026-04-15T14:10:16.221 INFO:teuthology.orchestra.run.vm05.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 5.8 MB/s | 837 kB 00:00 2026-04-15T14:10:16.221 INFO:teuthology.orchestra.run.vm05.stdout:-------------------------------------------------------------------------------- 2026-04-15T14:10:16.221 INFO:teuthology.orchestra.run.vm05.stdout:Total 5.8 MB/s | 2.3 MB 00:00 2026-04-15T14:10:16.278 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction check 2026-04-15T14:10:16.285 INFO:teuthology.orchestra.run.vm04.stdout:Transaction check succeeded. 2026-04-15T14:10:16.286 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction test 2026-04-15T14:10:16.300 INFO:teuthology.orchestra.run.vm05.stdout:Running transaction check 2026-04-15T14:10:16.309 INFO:teuthology.orchestra.run.vm05.stdout:Transaction check succeeded. 2026-04-15T14:10:16.309 INFO:teuthology.orchestra.run.vm05.stdout:Running transaction test 2026-04-15T14:10:16.336 INFO:teuthology.orchestra.run.vm04.stdout:Transaction test succeeded. 2026-04-15T14:10:16.337 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction 2026-04-15T14:10:16.361 INFO:teuthology.orchestra.run.vm05.stdout:Transaction test succeeded. 2026-04-15T14:10:16.361 INFO:teuthology.orchestra.run.vm05.stdout:Running transaction 2026-04-15T14:10:16.485 INFO:teuthology.orchestra.run.vm04.stdout: Preparing : 1/1 2026-04-15T14:10:16.496 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-04-15T14:10:16.508 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-04-15T14:10:16.516 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-15T14:10:16.521 INFO:teuthology.orchestra.run.vm05.stdout: Preparing : 1/1 2026-04-15T14:10:16.524 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-15T14:10:16.526 INFO:teuthology.orchestra.run.vm04.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-04-15T14:10:16.531 INFO:teuthology.orchestra.run.vm05.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-04-15T14:10:16.542 INFO:teuthology.orchestra.run.vm05.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-04-15T14:10:16.549 INFO:teuthology.orchestra.run.vm05.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-15T14:10:16.558 INFO:teuthology.orchestra.run.vm05.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-15T14:10:16.560 INFO:teuthology.orchestra.run.vm05.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-04-15T14:10:16.678 INFO:teuthology.orchestra.run.vm04.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-04-15T14:10:16.683 INFO:teuthology.orchestra.run.vm04.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-15T14:10:16.714 INFO:teuthology.orchestra.run.vm05.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-04-15T14:10:16.719 INFO:teuthology.orchestra.run.vm05.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-15T14:10:17.015 INFO:teuthology.orchestra.run.vm04.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-15T14:10:17.015 INFO:teuthology.orchestra.run.vm04.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-04-15T14:10:17.015 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:10:17.047 INFO:teuthology.orchestra.run.vm05.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-15T14:10:17.047 INFO:teuthology.orchestra.run.vm05.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-04-15T14:10:17.047 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:10:17.432 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-04-15T14:10:17.433 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-04-15T14:10:17.433 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-15T14:10:17.433 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-15T14:10:17.433 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-04-15T14:10:17.438 INFO:teuthology.orchestra.run.vm05.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-04-15T14:10:17.438 INFO:teuthology.orchestra.run.vm05.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-04-15T14:10:17.438 INFO:teuthology.orchestra.run.vm05.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-15T14:10:17.438 INFO:teuthology.orchestra.run.vm05.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-15T14:10:17.439 INFO:teuthology.orchestra.run.vm05.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-04-15T14:10:17.528 INFO:teuthology.orchestra.run.vm05.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-04-15T14:10:17.529 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:10:17.529 INFO:teuthology.orchestra.run.vm05.stdout:Installed: 2026-04-15T14:10:17.529 INFO:teuthology.orchestra.run.vm05.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-04-15T14:10:17.529 INFO:teuthology.orchestra.run.vm05.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-04-15T14:10:17.529 INFO:teuthology.orchestra.run.vm05.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-04-15T14:10:17.529 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:10:17.529 INFO:teuthology.orchestra.run.vm05.stdout:Complete! 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout:Installed: 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:10:17.534 INFO:teuthology.orchestra.run.vm04.stdout:Complete! 2026-04-15T14:10:17.585 DEBUG:teuthology.parallel:result is None 2026-04-15T14:10:17.586 DEBUG:teuthology.parallel:result is None 2026-04-15T14:10:17.586 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-04-15T14:10:17.589 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-04-15T14:10:17.589 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:10:17.589 DEBUG:teuthology.orchestra.run.vm04:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T14:10:17.613 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-15T14:10:17.614 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_1 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 961 Links: 1 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:10:17.294638552 +0000 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:17.147638405 +0000 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:17.147638405 +0000 2026-04-15T14:10:17.679 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:17.147638405 +0000 2026-04-15T14:10:17.679 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-15T14:10:17.752 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:10:17.752 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:10:17.752 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000120355 s, 4.3 MB/s 2026-04-15T14:10:17.753 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-15T14:10:17.813 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_2 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 955 Links: 1 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:10:17.294638552 +0000 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:17.142638400 +0000 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:17.142638400 +0000 2026-04-15T14:10:17.872 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:17.142638400 +0000 2026-04-15T14:10:17.872 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-15T14:10:17.938 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:10:17.938 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:10:17.938 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000153347 s, 3.3 MB/s 2026-04-15T14:10:17.939 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-15T14:10:18.002 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_3 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 971 Links: 1 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:10:17.294638552 +0000 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:17.155638413 +0000 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:17.155638413 +0000 2026-04-15T14:10:18.064 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:17.155638413 +0000 2026-04-15T14:10:18.064 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-15T14:10:18.136 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:10:18.136 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:10:18.136 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000169798 s, 3.0 MB/s 2026-04-15T14:10:18.139 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-15T14:10:18.200 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_4 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 950 Links: 1 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:10:17.295638553 +0000 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:17.140638398 +0000 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:17.140638398 +0000 2026-04-15T14:10:18.262 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:17.140638398 +0000 2026-04-15T14:10:18.262 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-15T14:10:18.330 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:10:18.330 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:10:18.330 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000170489 s, 3.0 MB/s 2026-04-15T14:10:18.331 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-15T14:10:18.391 DEBUG:teuthology.orchestra.run.vm04:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-15T14:10:18.525 INFO:teuthology.orchestra.run.vm04.stdout:loop 2026-04-15T14:10:18.527 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_1... 2026-04-15T14:10:18.527 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-15T14:10:18.563 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-15T14:10:18.591 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_11 2026-04-15T14:10:18.618 INFO:teuthology.orchestra.run.vm04.stdout:connecting to device: nvme0 2026-04-15T14:10:18.619 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_2... 2026-04-15T14:10:18.619 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-15T14:10:18.695 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-15T14:10:18.723 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_21 2026-04-15T14:10:18.754 INFO:teuthology.orchestra.run.vm04.stdout:connecting to device: nvme1 2026-04-15T14:10:18.755 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_3... 2026-04-15T14:10:18.756 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-15T14:10:18.837 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-15T14:10:18.869 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_31 2026-04-15T14:10:18.896 INFO:teuthology.orchestra.run.vm04.stdout:connecting to device: nvme2 2026-04-15T14:10:18.899 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_4... 2026-04-15T14:10:18.899 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-15T14:10:18.986 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-15T14:10:19.017 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_41 2026-04-15T14:10:19.044 INFO:teuthology.orchestra.run.vm04.stdout:connecting to device: nvme3 2026-04-15T14:10:19.047 DEBUG:teuthology.orchestra.run.vm04:> lsblk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:sr0 11:0 1 366K 0 rom 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:vda 252:0 0 40G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:└─vda1 252:1 0 40G 0 part / 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:vdb 252:16 0 20G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:vdc 252:32 0 20G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:vdd 252:48 0 20G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:vde 252:64 0 20G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:nvme0n1 259:0 0 20G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:nvme1n1 259:3 0 20G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:nvme2n1 259:5 0 20G 0 disk 2026-04-15T14:10:19.069 INFO:teuthology.orchestra.run.vm04.stdout:nvme3n1 259:7 0 20G 0 disk 2026-04-15T14:10:19.069 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme list -o json 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "Devices":[ 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace":1, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath":"/dev/nvme0n1", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "GenericPath":"/dev/ng0n1", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber":"6b7ad07c165929411c9d", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize":512 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace":1, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath":"/dev/nvme1n1", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "GenericPath":"/dev/ng1n1", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber":"fff789e869d944ed0405", 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize":512 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace":1, 2026-04-15T14:10:19.138 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath":"/dev/nvme2n1", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "GenericPath":"/dev/ng2n1", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber":"3369c177c0052290acf5", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize":512 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace":1, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath":"/dev/nvme3n1", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "GenericPath":"/dev/ng3n1", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber":"6212e725753f54ffe728", 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize":512 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-04-15T14:10:19.139 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-04-15T14:10:19.139 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-15T14:10:19.210 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:19.211 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:19.211 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00369912 s, 1.1 MB/s 2026-04-15T14:10:19.211 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-15T14:10:19.281 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:19.281 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:19.281 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-15T14:10:19.282 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:19.353 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:19.353 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:19.353 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00386567 s, 1.1 MB/s 2026-04-15T14:10:19.354 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-15T14:10:19.418 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:19.418 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:19.418 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-15T14:10:19.419 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:19.491 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:19.491 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:19.491 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00391091 s, 1.0 MB/s 2026-04-15T14:10:19.493 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-15T14:10:19.559 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:19.559 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:19.559 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-15T14:10:19.560 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-15T14:10:19.631 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:19.631 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:19.631 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0046548 s, 880 kB/s 2026-04-15T14:10:19.632 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-15T14:10:19.700 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:19.700 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:19.700 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-15T14:10:19.701 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:19.776 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:19.776 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:19.776 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00406901 s, 1.0 MB/s 2026-04-15T14:10:19.781 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-15T14:10:19.849 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:19.849 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:19.849 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-15T14:10:19.854 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:19.926 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:19.926 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:19.926 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00375566 s, 1.1 MB/s 2026-04-15T14:10:19.931 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-15T14:10:20.007 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:20.007 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:20.007 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-15T14:10:20.009 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-15T14:10:20.084 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:20.084 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:20.084 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00472797 s, 866 kB/s 2026-04-15T14:10:20.091 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-15T14:10:20.167 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:20.170 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:20.170 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-15T14:10:20.171 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:20.248 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:20.249 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:20.249 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00601061 s, 681 kB/s 2026-04-15T14:10:20.252 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-15T14:10:20.324 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:20.324 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:20.324 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-15T14:10:20.325 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:20.395 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:20.395 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:20.395 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00389588 s, 1.1 MB/s 2026-04-15T14:10:20.396 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-15T14:10:20.465 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:20.465 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:20.465 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-15T14:10:20.466 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-15T14:10:20.538 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:20.538 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:20.538 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00361205 s, 1.1 MB/s 2026-04-15T14:10:20.540 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-15T14:10:20.611 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:20.611 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:20.611 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-15T14:10:20.612 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:20.686 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:20.686 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:20.686 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00405297 s, 1.0 MB/s 2026-04-15T14:10:20.690 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-15T14:10:20.758 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:20.758 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:20.758 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-15T14:10:20.759 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:20.833 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-15T14:10:20.833 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-15T14:10:20.833 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00485402 s, 844 kB/s 2026-04-15T14:10:20.834 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-15T14:10:20.903 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:20.903 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:20.903 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-15T14:10:20.905 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T14:10:20.905 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:10:20.905 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/scratch_devs 2026-04-15T14:10:20.972 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:10:20.973 DEBUG:teuthology.orchestra.run.vm05:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T14:10:20.992 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-15T14:10:20.992 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vg_nvme/lv_1 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 980 Links: 1 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:10:17.308202646 +0000 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:17.147202472 +0000 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:17.147202472 +0000 2026-04-15T14:10:21.053 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:17.147202472 +0000 2026-04-15T14:10:21.054 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-15T14:10:21.126 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:10:21.126 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:10:21.126 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.00017594 s, 2.9 MB/s 2026-04-15T14:10:21.127 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-15T14:10:21.189 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vg_nvme/lv_2 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 971 Links: 1 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:10:17.309202647 +0000 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:17.142202467 +0000 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:17.142202467 +0000 2026-04-15T14:10:21.251 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:17.142202467 +0000 2026-04-15T14:10:21.251 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-15T14:10:21.316 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:10:21.316 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:10:21.316 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000195046 s, 2.6 MB/s 2026-04-15T14:10:21.318 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-15T14:10:21.374 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vg_nvme/lv_3 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 968 Links: 1 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:10:17.309202647 +0000 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:17.141202466 +0000 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:17.141202466 +0000 2026-04-15T14:10:21.431 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:17.141202466 +0000 2026-04-15T14:10:21.431 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-15T14:10:21.495 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:10:21.495 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:10:21.496 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000164288 s, 3.1 MB/s 2026-04-15T14:10:21.497 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-15T14:10:21.554 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vg_nvme/lv_4 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 962 Links: 1 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:device_t:s0 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:10:17.310202648 +0000 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:17.140202465 +0000 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:17.140202465 +0000 2026-04-15T14:10:21.609 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:17.140202465 +0000 2026-04-15T14:10:21.610 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-15T14:10:21.678 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:10:21.678 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:10:21.678 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000213308 s, 2.4 MB/s 2026-04-15T14:10:21.679 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-15T14:10:21.740 DEBUG:teuthology.orchestra.run.vm05:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-15T14:10:21.882 INFO:teuthology.orchestra.run.vm05.stdout:loop 2026-04-15T14:10:21.884 INFO:tasks.nvme_loop:Connecting nvme_loop vm05:/dev/vg_nvme/lv_1... 2026-04-15T14:10:21.884 DEBUG:teuthology.orchestra.run.vm05:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-15T14:10:21.925 INFO:teuthology.orchestra.run.vm05.stdout:1 2026-04-15T14:10:21.963 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vg_nvme/lv_11 2026-04-15T14:10:22.001 INFO:teuthology.orchestra.run.vm05.stdout:connecting to device: nvme0 2026-04-15T14:10:22.006 INFO:tasks.nvme_loop:Connecting nvme_loop vm05:/dev/vg_nvme/lv_2... 2026-04-15T14:10:22.006 DEBUG:teuthology.orchestra.run.vm05:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-15T14:10:22.091 INFO:teuthology.orchestra.run.vm05.stdout:1 2026-04-15T14:10:22.128 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vg_nvme/lv_21 2026-04-15T14:10:22.162 INFO:teuthology.orchestra.run.vm05.stdout:connecting to device: nvme1 2026-04-15T14:10:22.167 INFO:tasks.nvme_loop:Connecting nvme_loop vm05:/dev/vg_nvme/lv_3... 2026-04-15T14:10:22.167 DEBUG:teuthology.orchestra.run.vm05:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-15T14:10:22.213 INFO:teuthology.orchestra.run.vm05.stdout:1 2026-04-15T14:10:22.252 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vg_nvme/lv_31 2026-04-15T14:10:22.288 INFO:teuthology.orchestra.run.vm05.stdout:connecting to device: nvme2 2026-04-15T14:10:22.289 INFO:tasks.nvme_loop:Connecting nvme_loop vm05:/dev/vg_nvme/lv_4... 2026-04-15T14:10:22.289 DEBUG:teuthology.orchestra.run.vm05:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-15T14:10:22.331 INFO:teuthology.orchestra.run.vm05.stdout:1 2026-04-15T14:10:22.374 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vg_nvme/lv_41 2026-04-15T14:10:22.406 INFO:teuthology.orchestra.run.vm05.stdout:connecting to device: nvme3 2026-04-15T14:10:22.408 DEBUG:teuthology.orchestra.run.vm05:> lsblk 2026-04-15T14:10:22.472 INFO:teuthology.orchestra.run.vm05.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:sr0 11:0 1 366K 0 rom 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:vda 252:0 0 40G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:└─vda1 252:1 0 40G 0 part / 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:vdb 252:16 0 20G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:vdc 252:32 0 20G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:vdd 252:48 0 20G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:vde 252:64 0 20G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:nvme0n1 259:1 0 20G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:nvme1n1 259:2 0 20G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:nvme2n1 259:4 0 20G 0 disk 2026-04-15T14:10:22.473 INFO:teuthology.orchestra.run.vm05.stdout:nvme3n1 259:6 0 20G 0 disk 2026-04-15T14:10:22.473 DEBUG:teuthology.orchestra.run.vm05:> sudo nvme list -o json 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "Devices":[ 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "NameSpace":1, 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "DevicePath":"/dev/nvme0n1", 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "GenericPath":"/dev/ng0n1", 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "SerialNumber":"aa69ffdfa7408bf7867c", 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "SectorSize":512 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "NameSpace":1, 2026-04-15T14:10:22.541 INFO:teuthology.orchestra.run.vm05.stdout: "DevicePath":"/dev/nvme1n1", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "GenericPath":"/dev/ng1n1", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "SerialNumber":"6884663c6eaa85c58c9d", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "SectorSize":512 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "NameSpace":1, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "DevicePath":"/dev/nvme2n1", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "GenericPath":"/dev/ng2n1", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "SerialNumber":"01fde6453ce49045a1d5", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "SectorSize":512 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "NameSpace":1, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "DevicePath":"/dev/nvme3n1", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "GenericPath":"/dev/ng3n1", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "Firmware":"5.14.0-6", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "ModelNumber":"Linux", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "SerialNumber":"0105428e5b971c67554c", 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "UsedBytes":21470642176, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "MaximumLBA":41934848, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "PhysicalSize":21470642176, 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: "SectorSize":512 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout: ] 2026-04-15T14:10:22.542 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-04-15T14:10:22.543 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-15T14:10:22.619 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:22.619 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:22.619 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00434412 s, 943 kB/s 2026-04-15T14:10:22.621 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-15T14:10:22.693 INFO:teuthology.orchestra.run.vm05.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:22.693 INFO:teuthology.orchestra.run.vm05.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:22.693 INFO:teuthology.orchestra.run.vm05.stdout:00000016 2026-04-15T14:10:22.695 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:22.767 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:22.767 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:22.767 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00412406 s, 993 kB/s 2026-04-15T14:10:22.768 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-15T14:10:22.841 INFO:teuthology.orchestra.run.vm05.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:22.841 INFO:teuthology.orchestra.run.vm05.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:22.841 INFO:teuthology.orchestra.run.vm05.stdout:40000016 2026-04-15T14:10:22.843 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:22.911 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:22.911 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:22.911 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00407286 s, 1.0 MB/s 2026-04-15T14:10:22.912 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-15T14:10:22.981 INFO:teuthology.orchestra.run.vm05.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:22.981 INFO:teuthology.orchestra.run.vm05.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:22.981 INFO:teuthology.orchestra.run.vm05.stdout:280000016 2026-04-15T14:10:22.983 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-15T14:10:23.055 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:23.055 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:23.055 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00364549 s, 1.1 MB/s 2026-04-15T14:10:23.056 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-15T14:10:23.123 INFO:teuthology.orchestra.run.vm05.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:23.123 INFO:teuthology.orchestra.run.vm05.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:23.123 INFO:teuthology.orchestra.run.vm05.stdout:00000016 2026-04-15T14:10:23.124 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:23.197 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:23.198 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:23.198 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00416265 s, 984 kB/s 2026-04-15T14:10:23.200 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-15T14:10:23.269 INFO:teuthology.orchestra.run.vm05.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:23.270 INFO:teuthology.orchestra.run.vm05.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:23.270 INFO:teuthology.orchestra.run.vm05.stdout:40000016 2026-04-15T14:10:23.271 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:23.340 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:23.340 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:23.340 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00365914 s, 1.1 MB/s 2026-04-15T14:10:23.346 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-15T14:10:23.415 INFO:teuthology.orchestra.run.vm05.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:23.415 INFO:teuthology.orchestra.run.vm05.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:23.415 INFO:teuthology.orchestra.run.vm05.stdout:280000016 2026-04-15T14:10:23.415 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-15T14:10:23.489 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:23.489 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:23.489 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00374265 s, 1.1 MB/s 2026-04-15T14:10:23.491 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-15T14:10:23.558 INFO:teuthology.orchestra.run.vm05.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:23.558 INFO:teuthology.orchestra.run.vm05.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:23.558 INFO:teuthology.orchestra.run.vm05.stdout:00000016 2026-04-15T14:10:23.559 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:23.625 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:23.626 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:23.626 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00382126 s, 1.1 MB/s 2026-04-15T14:10:23.627 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-15T14:10:23.694 INFO:teuthology.orchestra.run.vm05.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:23.694 INFO:teuthology.orchestra.run.vm05.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:23.694 INFO:teuthology.orchestra.run.vm05.stdout:40000016 2026-04-15T14:10:23.695 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:23.765 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:23.765 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:23.765 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00391665 s, 1.0 MB/s 2026-04-15T14:10:23.766 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-15T14:10:23.832 INFO:teuthology.orchestra.run.vm05.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:23.832 INFO:teuthology.orchestra.run.vm05.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:23.832 INFO:teuthology.orchestra.run.vm05.stdout:280000016 2026-04-15T14:10:23.834 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-15T14:10:23.910 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:23.910 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:23.910 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00496286 s, 825 kB/s 2026-04-15T14:10:23.912 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-15T14:10:23.980 INFO:teuthology.orchestra.run.vm05.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:23.980 INFO:teuthology.orchestra.run.vm05.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:23.980 INFO:teuthology.orchestra.run.vm05.stdout:00000016 2026-04-15T14:10:23.982 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-15T14:10:24.051 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:24.051 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:24.052 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00367585 s, 1.1 MB/s 2026-04-15T14:10:24.053 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-15T14:10:24.116 INFO:teuthology.orchestra.run.vm05.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:24.116 INFO:teuthology.orchestra.run.vm05.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:24.116 INFO:teuthology.orchestra.run.vm05.stdout:40000016 2026-04-15T14:10:24.117 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-15T14:10:24.184 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records in 2026-04-15T14:10:24.185 INFO:teuthology.orchestra.run.vm05.stderr:4096+0 records out 2026-04-15T14:10:24.185 INFO:teuthology.orchestra.run.vm05.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00412648 s, 993 kB/s 2026-04-15T14:10:24.185 DEBUG:teuthology.orchestra.run.vm05:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-15T14:10:24.249 INFO:teuthology.orchestra.run.vm05.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T14:10:24.249 INFO:teuthology.orchestra.run.vm05.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T14:10:24.249 INFO:teuthology.orchestra.run.vm05.stdout:280000016 2026-04-15T14:10:24.251 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T14:10:24.251 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:10:24.251 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/scratch_devs 2026-04-15T14:10:24.319 INFO:teuthology.run_tasks:Running task cephadm... 2026-04-15T14:10:24.363 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '187293b0588135c3607a12257332b6880af4eff9', 'cephadm_binary_url': 'https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5'}} 2026-04-15T14:10:24.363 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-04-15T14:10:24.363 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 2026-04-15T14:10:24.363 INFO:tasks.cephadm:Cluster fsid is d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:10:24.363 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-04-15T14:10:24.363 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-04-15T14:10:24.363 INFO:tasks.cephadm:Monitor IPs: {'mon.vm04': '192.168.123.104', 'mon.vm05': '192.168.123.105'} 2026-04-15T14:10:24.363 INFO:tasks.cephadm:Normalizing hostnames... 2026-04-15T14:10:24.363 DEBUG:teuthology.orchestra.run.vm04:> sudo hostname $(hostname -s) 2026-04-15T14:10:24.396 DEBUG:teuthology.orchestra.run.vm05:> sudo hostname $(hostname -s) 2026-04-15T14:10:24.426 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm 2026-04-15T14:10:24.426 DEBUG:teuthology.orchestra.run.vm04:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-15T14:10:26.269 INFO:teuthology.orchestra.run.vm04.stdout:-rw-r--r--. 1 ubuntu ubuntu 1036391 Apr 15 14:10 /home/ubuntu/cephtest/cephadm 2026-04-15T14:10:26.270 DEBUG:teuthology.orchestra.run.vm05:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-15T14:10:28.842 INFO:teuthology.orchestra.run.vm05.stdout:-rw-r--r--. 1 ubuntu ubuntu 1036391 Apr 15 14:10 /home/ubuntu/cephtest/cephadm 2026-04-15T14:10:28.842 DEBUG:teuthology.orchestra.run.vm04:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-15T14:10:28.864 DEBUG:teuthology.orchestra.run.vm05:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-15T14:10:28.885 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 on all hosts... 2026-04-15T14:10:28.885 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 pull 2026-04-15T14:10:28.907 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 pull 2026-04-15T14:10:29.115 INFO:teuthology.orchestra.run.vm04.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5... 2026-04-15T14:10:29.123 INFO:teuthology.orchestra.run.vm05.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5... 2026-04-15T14:11:08.064 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-04-15T14:11:08.065 INFO:teuthology.orchestra.run.vm05.stdout: "ceph_version": "ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable)", 2026-04-15T14:11:08.065 INFO:teuthology.orchestra.run.vm05.stdout: "image_id": "259950fb12cb763f6889e1e4c320167a5351669158cfdd94a1086a8bb5694c2e", 2026-04-15T14:11:08.065 INFO:teuthology.orchestra.run.vm05.stdout: "repo_digests": [ 2026-04-15T14:11:08.065 INFO:teuthology.orchestra.run.vm05.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9" 2026-04-15T14:11:08.065 INFO:teuthology.orchestra.run.vm05.stdout: ] 2026-04-15T14:11:08.065 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-04-15T14:11:09.685 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-04-15T14:11:09.685 INFO:teuthology.orchestra.run.vm04.stdout: "ceph_version": "ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable)", 2026-04-15T14:11:09.685 INFO:teuthology.orchestra.run.vm04.stdout: "image_id": "259950fb12cb763f6889e1e4c320167a5351669158cfdd94a1086a8bb5694c2e", 2026-04-15T14:11:09.685 INFO:teuthology.orchestra.run.vm04.stdout: "repo_digests": [ 2026-04-15T14:11:09.685 INFO:teuthology.orchestra.run.vm04.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9" 2026-04-15T14:11:09.685 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-04-15T14:11:09.686 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-04-15T14:11:09.709 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /etc/ceph 2026-04-15T14:11:09.741 DEBUG:teuthology.orchestra.run.vm05:> sudo mkdir -p /etc/ceph 2026-04-15T14:11:09.768 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 777 /etc/ceph 2026-04-15T14:11:09.808 DEBUG:teuthology.orchestra.run.vm05:> sudo chmod 777 /etc/ceph 2026-04-15T14:11:09.838 INFO:tasks.cephadm:Writing seed config... 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-04-15T14:11:09.839 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-04-15T14:11:09.839 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:11:09.839 DEBUG:teuthology.orchestra.run.vm04:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-04-15T14:11:09.864 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = d89dc7c6-38d4-11f1-aa58-cd98464f39ae [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-04-15T14:11:09.865 DEBUG:teuthology.orchestra.run.vm04:mon.vm04> sudo journalctl -f -n 0 -u ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04.service 2026-04-15T14:11:09.906 INFO:tasks.cephadm:Bootstrapping... 2026-04-15T14:11:09.906 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 -v bootstrap --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.104 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-04-15T14:11:10.069 INFO:teuthology.orchestra.run.vm04.stdout:-------------------------------------------------------------------------------- 2026-04-15T14:11:10.069 INFO:teuthology.orchestra.run.vm04.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5', '-v', 'bootstrap', '--fsid', 'd89dc7c6-38d4-11f1-aa58-cd98464f39ae', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.104', '--skip-admin-label'] 2026-04-15T14:11:10.069 INFO:teuthology.orchestra.run.vm04.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-04-15T14:11:10.069 INFO:teuthology.orchestra.run.vm04.stdout:Verifying podman|docker is present... 2026-04-15T14:11:10.094 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stdout 5.8.0 2026-04-15T14:11:10.094 INFO:teuthology.orchestra.run.vm04.stdout:Verifying lvm2 is present... 2026-04-15T14:11:10.094 INFO:teuthology.orchestra.run.vm04.stdout:Verifying time synchronization is in place... 2026-04-15T14:11:10.102 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-15T14:11:10.102 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-15T14:11:10.109 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-15T14:11:10.109 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout inactive 2026-04-15T14:11:10.116 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout enabled 2026-04-15T14:11:10.123 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout active 2026-04-15T14:11:10.123 INFO:teuthology.orchestra.run.vm04.stdout:Unit chronyd.service is enabled and running 2026-04-15T14:11:10.123 INFO:teuthology.orchestra.run.vm04.stdout:Repeating the final host check... 2026-04-15T14:11:10.144 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stdout 5.8.0 2026-04-15T14:11:10.144 INFO:teuthology.orchestra.run.vm04.stdout:podman (/bin/podman) version 5.8.0 is present 2026-04-15T14:11:10.144 INFO:teuthology.orchestra.run.vm04.stdout:systemctl is present 2026-04-15T14:11:10.144 INFO:teuthology.orchestra.run.vm04.stdout:lvcreate is present 2026-04-15T14:11:10.151 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-15T14:11:10.151 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-15T14:11:10.157 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-15T14:11:10.158 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout inactive 2026-04-15T14:11:10.165 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout enabled 2026-04-15T14:11:10.171 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout active 2026-04-15T14:11:10.171 INFO:teuthology.orchestra.run.vm04.stdout:Unit chronyd.service is enabled and running 2026-04-15T14:11:10.171 INFO:teuthology.orchestra.run.vm04.stdout:Host looks OK 2026-04-15T14:11:10.171 INFO:teuthology.orchestra.run.vm04.stdout:Cluster fsid: d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:10.171 INFO:teuthology.orchestra.run.vm04.stdout:Acquiring lock 140236449112464 on /run/cephadm/d89dc7c6-38d4-11f1-aa58-cd98464f39ae.lock 2026-04-15T14:11:10.172 INFO:teuthology.orchestra.run.vm04.stdout:Lock 140236449112464 acquired on /run/cephadm/d89dc7c6-38d4-11f1-aa58-cd98464f39ae.lock 2026-04-15T14:11:10.172 INFO:teuthology.orchestra.run.vm04.stdout:Verifying IP 192.168.123.104 port 3300 ... 2026-04-15T14:11:10.173 INFO:teuthology.orchestra.run.vm04.stdout:Verifying IP 192.168.123.104 port 6789 ... 2026-04-15T14:11:10.173 INFO:teuthology.orchestra.run.vm04.stdout:Base mon IP(s) is [192.168.123.104:3300, 192.168.123.104:6789], mon addrv is [v2:192.168.123.104:3300,v1:192.168.123.104:6789] 2026-04-15T14:11:10.177 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.104 metric 100 2026-04-15T14:11:10.177 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.104 metric 100 2026-04-15T14:11:10.179 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-04-15T14:11:10.179 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-04-15T14:11:10.181 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-04-15T14:11:10.181 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-04-15T14:11:10.181 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-15T14:11:10.181 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-04-15T14:11:10.181 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:4/64 scope link noprefixroute 2026-04-15T14:11:10.181 INFO:teuthology.orchestra.run.vm04.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-15T14:11:10.182 INFO:teuthology.orchestra.run.vm04.stdout:Mon IP `192.168.123.104` is in CIDR network `192.168.123.0/24` 2026-04-15T14:11:10.182 INFO:teuthology.orchestra.run.vm04.stdout:Mon IP `192.168.123.104` is in CIDR network `192.168.123.0/24` 2026-04-15T14:11:10.182 INFO:teuthology.orchestra.run.vm04.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-04-15T14:11:10.183 INFO:teuthology.orchestra.run.vm04.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-04-15T14:11:10.183 INFO:teuthology.orchestra.run.vm04.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5... 2026-04-15T14:11:10.856 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stdout 259950fb12cb763f6889e1e4c320167a5351669158cfdd94a1086a8bb5694c2e 2026-04-15T14:11:10.857 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5... 2026-04-15T14:11:10.857 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stderr Getting image source signatures 2026-04-15T14:11:10.857 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stderr Copying blob sha256:e7c2626263b9fd5eb96ee9f2a3a79ffbcd14bea8677a4ffb106c8a12841f5b7b 2026-04-15T14:11:10.857 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stderr Copying config sha256:259950fb12cb763f6889e1e4c320167a5351669158cfdd94a1086a8bb5694c2e 2026-04-15T14:11:10.857 INFO:teuthology.orchestra.run.vm04.stdout:/bin/podman: stderr Writing manifest to image destination 2026-04-15T14:11:11.138 INFO:teuthology.orchestra.run.vm04.stdout:ceph: stdout ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable) 2026-04-15T14:11:11.138 INFO:teuthology.orchestra.run.vm04.stdout:Ceph version: ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable) 2026-04-15T14:11:11.138 INFO:teuthology.orchestra.run.vm04.stdout:Extracting ceph user uid/gid from container image... 2026-04-15T14:11:11.254 INFO:teuthology.orchestra.run.vm04.stdout:stat: stdout 167 167 2026-04-15T14:11:11.254 INFO:teuthology.orchestra.run.vm04.stdout:Creating initial keys... 2026-04-15T14:11:11.382 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph-authtool: stdout AQB/nN9plEclFRAANiT887p6avrS/WElfJozDA== 2026-04-15T14:11:11.503 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph-authtool: stdout AQB/nN9pEP5bHBAAMJgXZ7u3I3UOw6Ilt++B8Q== 2026-04-15T14:11:11.633 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph-authtool: stdout AQB/nN9ps6OxIxAAogvTY6ZJoxQOAlqKDCIwxQ== 2026-04-15T14:11:11.633 INFO:teuthology.orchestra.run.vm04.stdout:Creating initial monmap... 2026-04-15T14:11:11.771 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-15T14:11:11.771 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = tentacle 2026-04-15T14:11:11.771 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:11.771 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-15T14:11:11.772 INFO:teuthology.orchestra.run.vm04.stdout:monmaptool for vm04 [v2:192.168.123.104:3300,v1:192.168.123.104:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-15T14:11:11.772 INFO:teuthology.orchestra.run.vm04.stdout:setting min_mon_release = tentacle 2026-04-15T14:11:11.772 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/monmaptool: set fsid to d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:11.772 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-15T14:11:11.772 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:11:11.772 INFO:teuthology.orchestra.run.vm04.stdout:Creating mon... 2026-04-15T14:11:11.933 INFO:teuthology.orchestra.run.vm04.stdout:create mon.vm04 on 2026-04-15T14:11:12.283 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-04-15T14:11:12.431 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae.target → /etc/systemd/system/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae.target. 2026-04-15T14:11:12.431 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae.target → /etc/systemd/system/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae.target. 2026-04-15T14:11:12.610 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04 2026-04-15T14:11:12.610 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Failed to reset failed state of unit ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04.service: Unit ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04.service not loaded. 2026-04-15T14:11:12.775 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae.target.wants/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04.service → /etc/systemd/system/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@.service. 2026-04-15T14:11:12.983 INFO:teuthology.orchestra.run.vm04.stdout:firewalld does not appear to be present 2026-04-15T14:11:12.983 INFO:teuthology.orchestra.run.vm04.stdout:Not possible to enable service . firewalld.service is not available 2026-04-15T14:11:12.983 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for mon to start... 2026-04-15T14:11:12.983 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for mon... 2026-04-15T14:11:13.015 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:13 vm04 ceph-mon[53029]: mkfs d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:13.266 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:13 vm04 ceph-mon[53029]: mon.vm04 is new leader, mons vm04 in quorum (ranks 0) 2026-04-15T14:11:13.320 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout cluster: 2026-04-15T14:11:13.320 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout id: d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:13.320 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-04-15T14:11:13.320 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:13.320 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout services: 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm04 (age 0.277528s) [leader: vm04] 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout data: 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout pgs: 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:mon is available 2026-04-15T14:11:13.321 INFO:teuthology.orchestra.run.vm04.stdout:Assimilating anything we can from ceph.conf... 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout [global] 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout fsid = d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.104:3300,v1:192.168.123.104:6789] 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-15T14:11:13.643 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:13.644 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout [osd] 2026-04-15T14:11:13.644 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-15T14:11:13.644 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-15T14:11:13.644 INFO:teuthology.orchestra.run.vm04.stdout:Generating new minimal ceph.conf... 2026-04-15T14:11:13.975 INFO:teuthology.orchestra.run.vm04.stdout:Restarting the monitor... 2026-04-15T14:11:14.020 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:13 vm04 systemd[1]: Stopping Ceph mon.vm04 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae... 2026-04-15T14:11:14.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: mon.vm04 is new leader, mons vm04 in quorum (ranks 0) 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: monmap epoch 1 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: last_changed 2026-04-15T14:11:11.742276+0000 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: created 2026-04-15T14:11:11.742276+0000 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: min_mon_release 20 (tentacle) 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: election_strategy: 1 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: fsmap 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: osdmap e1: 0 total, 0 up, 0 in 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: mgrmap e1: no daemons active 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: from='client.? 192.168.123.104:0/623590724' entity='client.admin' cmd={"prefix": "status"} : dispatch 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: from='client.? 192.168.123.104:0/3661827347' entity='client.admin' cmd={"prefix": "config assimilate-conf"} : dispatch 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: from='client.? 192.168.123.104:0/3661827347' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53029]: from='client.? 192.168.123.104:0/898021625' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04[53025]: 2026-04-15T14:11:14.089+0000 7fe760603640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm04 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04[53025]: 2026-04-15T14:11:14.089+0000 7fe760603640 -1 mon.vm04@0(leader) e1 *** Got Signal Terminated *** 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 podman[53242]: 2026-04-15 14:11:14.151529393 +0000 UTC m=+0.075832980 container died 61ce9caa9748b323c660a32c413b3db87880a1b2aa2d81df09e4d208238bf82c (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 podman[53242]: 2026-04-15 14:11:14.169774189 +0000 UTC m=+0.094077776 container remove 61ce9caa9748b323c660a32c413b3db87880a1b2aa2d81df09e4d208238bf82c (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095) 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 bash[53242]: ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 systemd[1]: ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04.service: Deactivated successfully. 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 systemd[1]: Stopped Ceph mon.vm04 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae. 2026-04-15T14:11:14.280 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 systemd[1]: Starting Ceph mon.vm04 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae... 2026-04-15T14:11:14.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 podman[53325]: 2026-04-15 14:11:14.360354799 +0000 UTC m=+0.014298122 image pull 259950fb12cb763f6889e1e4c320167a5351669158cfdd94a1086a8bb5694c2e harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 2026-04-15T14:11:14.795 INFO:teuthology.orchestra.run.vm04.stdout:Setting public_network to 192.168.123.0/24 in global config section 2026-04-15T14:11:14.873 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 podman[53325]: 2026-04-15 14:11:14.670445922 +0000 UTC m=+0.324389225 container create 6257b904a43573c45c7e4cade5de85efd85107bb4e3c97aa3792b1da2d0f3e6f (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86) 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 podman[53325]: 2026-04-15 14:11:14.777674109 +0000 UTC m=+0.431617433 container init 6257b904a43573c45c7e4cade5de85efd85107bb4e3c97aa3792b1da2d0f3e6f (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 podman[53325]: 2026-04-15 14:11:14.784563554 +0000 UTC m=+0.438506867 container start 6257b904a43573c45c7e4cade5de85efd85107bb4e3c97aa3792b1da2d0f3e6f (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 bash[53325]: 6257b904a43573c45c7e4cade5de85efd85107bb4e3c97aa3792b1da2d0f3e6f 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 systemd[1]: Started Ceph mon.vm04 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae. 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: set uid:gid to 167:167 (ceph:ceph) 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable - RelWithDebInfo), process ceph-mon, pid 2 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: pidfile_write: ignore empty --pid-file 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: load: jerasure load: lrc 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: RocksDB version: 7.9.2 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Git sha 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Compile date 2026-04-14 11:30:02 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: DB SUMMARY 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: DB Session ID: 59CZ4PE15IA220HAVW41 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: CURRENT file: CURRENT 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: IDENTITY file: IDENTITY 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm04/store.db dir, Total Num: 1, files: 000008.sst 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm04/store.db: 000009.log size: 88971 ; 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.error_if_exists: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.create_if_missing: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.paranoid_checks: 1 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.flush_verify_memtable_count: 1 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.env: 0x5595a10f6440 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.fs: PosixFileSystem 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.info_log: 0x5595a2575300 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_file_opening_threads: 16 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.statistics: (nil) 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.use_fsync: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_log_file_size: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.log_file_time_to_roll: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.keep_log_file_num: 1000 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.recycle_log_file_num: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.allow_fallocate: 1 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.allow_mmap_reads: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.allow_mmap_writes: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.use_direct_reads: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.create_missing_column_families: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.db_log_dir: 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.wal_dir: 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.table_cache_numshardbits: 6 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.WAL_ttl_seconds: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.WAL_size_limit_MB: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.is_fd_close_on_exec: 1 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.advise_random_on_open: 1 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.db_write_buffer_size: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.write_buffer_manager: 0x5595a2578500 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.use_adaptive_mutex: 0 2026-04-15T14:11:14.874 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.rate_limiter: (nil) 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.wal_recovery_mode: 2 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.enable_thread_tracking: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.enable_pipelined_write: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.unordered_write: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.row_cache: None 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.wal_filter: None 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.allow_ingest_behind: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.two_write_queues: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.manual_wal_flush: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.wal_compression: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.atomic_flush: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.persist_stats_to_disk: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.write_dbid_to_manifest: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.log_readahead_size: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.best_efforts_recovery: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.allow_data_in_errors: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.db_host_id: __hostname__ 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.enforce_single_del_contracts: true 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_background_jobs: 2 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_background_compactions: -1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_subcompactions: 1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.delayed_write_rate : 16777216 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_total_wal_size: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.stats_dump_period_sec: 600 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.stats_persist_period_sec: 600 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_open_files: -1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bytes_per_sync: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.wal_bytes_per_sync: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.strict_bytes_per_sync: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_readahead_size: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_background_flushes: -1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Compression algorithms supported: 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kZSTD supported: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kXpressCompression supported: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kBZip2Compression supported: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kLZ4Compression supported: 1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kZlibCompression supported: 1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kLZ4HCCompression supported: 1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: kSnappyCompression supported: 1 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Fast CRC32 supported: Supported on x86 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: DMutex implementation: pthread_mutex_t 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm04/store.db/MANIFEST-000010 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.merge_operator: 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_filter: None 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_filter_factory: None 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.sst_partitioner_factory: None 2026-04-15T14:11:14.875 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.memtable_factory: SkipListFactory 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.table_factory: BlockBasedTable 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5595a2574ec0) 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: cache_index_and_filter_blocks: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: pin_top_level_index_and_filter: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: index_type: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: data_block_index_type: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: index_shortening: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: data_block_hash_table_util_ratio: 0.750000 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: checksum: 4 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: no_block_cache: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_cache: 0x5595a256b8d0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_cache_name: BinnedLRUCache 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_cache_options: 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: capacity : 536870912 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: num_shard_bits : 4 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: strict_capacity_limit : 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: high_pri_pool_ratio: 0.000 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_cache_compressed: (nil) 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: persistent_cache: (nil) 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_size: 4096 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_size_deviation: 10 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_restart_interval: 16 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: index_block_restart_interval: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: metadata_block_size: 4096 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: partition_filters: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: use_delta_encoding: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: filter_policy: bloomfilter 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: whole_key_filtering: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: verify_compression: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: read_amp_bytes_per_bit: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: format_version: 5 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: enable_index_compression: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: block_align: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: max_auto_readahead_size: 262144 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: prepopulate_block_cache: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: initial_auto_readahead_size: 8192 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout: num_file_reads_for_auto_readahead: 2 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.write_buffer_size: 33554432 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_write_buffer_number: 2 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression: NoCompression 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression: Disabled 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.prefix_extractor: nullptr 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.num_levels: 7 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.window_bits: -14 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.level: 32767 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.strategy: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-15T14:11:14.876 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.enabled: false 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.target_file_size_base: 67108864 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.target_file_size_multiplier: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.arena_block_size: 1048576 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.disable_auto_compactions: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.inplace_update_support: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.inplace_update_num_locks: 10000 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.memtable_huge_page_size: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.bloom_locality: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.max_successive_merges: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.optimize_filters_for_hits: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.paranoid_file_checks: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.force_consistency_checks: 1 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.report_bg_io_stats: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.ttl: 2592000 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.periodic_compaction_seconds: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.enable_blob_files: false 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.min_blob_size: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.blob_file_size: 268435456 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.blob_compression_type: NoCompression 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.enable_blob_garbage_collection: false 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.blob_file_starting_level: 0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm04/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 1cc4d617-8c12-4d12-b97b-53222cf127d0 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776262274813451, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-04-15T14:11:14.877 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776262274815190, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 85963, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 248, "table_properties": {"data_size": 84122, "index_size": 230, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 10122, "raw_average_key_size": 47, "raw_value_size": 78319, "raw_average_value_size": 364, "num_data_blocks": 11, "num_entries": 215, "num_filter_entries": 215, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1776262274, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "1cc4d617-8c12-4d12-b97b-53222cf127d0", "db_session_id": "59CZ4PE15IA220HAVW41", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776262274815247, "job": 1, "event": "recovery_finished"} 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm04/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5595a2596e00 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: DB pointer 0x5595a26e2000 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: ** DB Stats ** 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: ** Compaction Stats [default] ** 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: L0 2/0 85.82 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Sum 2/0 85.82 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: ** Compaction Stats [default] ** 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 53.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: AddFile(Keys): cumulative 0, interval 0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Cumulative compaction: 0.00 GB write, 10.07 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Interval compaction: 0.00 GB write, 10.07 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Block cache BinnedLRUCache@0x5595a256b8d0#2 capacity: 512.00 MB usage: 1.19 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.3e-05 secs_since: 0 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,0.77 KB,0.000146031%) IndexBlock(2,0.42 KB,8.04663e-05%) Misc(1,0.00 KB,0%) 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: ** File Read Latency Histogram By Level [default] ** 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: starting mon.vm04 rank 0 at public addrs [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] at bind addrs [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon_data /var/lib/ceph/mon/ceph-vm04 fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???) e1 preinit fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???).mds e1 new map 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???).mds e1 print_map 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: e1 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: btime 2026-04-15T14:11:13:006993+0000 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: legacy client fscid: -1 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout: No filesystems configured 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T14:11:14.878 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-04-15T14:11:15.103 INFO:teuthology.orchestra.run.vm04.stdout:Wrote config to /etc/ceph/ceph.conf 2026-04-15T14:11:15.105 INFO:teuthology.orchestra.run.vm04.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-04-15T14:11:15.106 INFO:teuthology.orchestra.run.vm04.stdout:Creating mgr... 2026-04-15T14:11:15.106 INFO:teuthology.orchestra.run.vm04.stdout:Verifying port 0.0.0.0:9283 ... 2026-04-15T14:11:15.106 INFO:teuthology.orchestra.run.vm04.stdout:Verifying port 0.0.0.0:8765 ... 2026-04-15T14:11:15.106 INFO:teuthology.orchestra.run.vm04.stdout:Verifying port 0.0.0.0:8443 ... 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mon.vm04 is new leader, mons vm04 in quorum (ranks 0) 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: monmap epoch 1 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: last_changed 2026-04-15T14:11:11.742276+0000 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: created 2026-04-15T14:11:11.742276+0000 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: min_mon_release 20 (tentacle) 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: election_strategy: 1 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: fsmap 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: osdmap e1: 0 total, 0 up, 0 in 2026-04-15T14:11:15.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:14 vm04 ceph-mon[53345]: mgrmap e1: no daemons active 2026-04-15T14:11:15.264 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mgr.vm04.ycniad 2026-04-15T14:11:15.264 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Failed to reset failed state of unit ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mgr.vm04.ycniad.service: Unit ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mgr.vm04.ycniad.service not loaded. 2026-04-15T14:11:15.393 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae.target.wants/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mgr.vm04.ycniad.service → /etc/systemd/system/ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@.service. 2026-04-15T14:11:15.570 INFO:teuthology.orchestra.run.vm04.stdout:firewalld does not appear to be present 2026-04-15T14:11:15.570 INFO:teuthology.orchestra.run.vm04.stdout:Not possible to enable service . firewalld.service is not available 2026-04-15T14:11:15.570 INFO:teuthology.orchestra.run.vm04.stdout:firewalld does not appear to be present 2026-04-15T14:11:15.570 INFO:teuthology.orchestra.run.vm04.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-04-15T14:11:15.570 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for mgr to start... 2026-04-15T14:11:15.570 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for mgr... 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsid": "d89dc7c6-38d4-11f1-aa58-cd98464f39ae", 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 0 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "vm04" 2026-04-15T14:11:15.897 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T14:11:15.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T14:11:13:006993+0000", 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T14:11:13.007723+0000", 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:15.899 INFO:teuthology.orchestra.run.vm04.stdout:mgr not available, waiting (1/15)... 2026-04-15T14:11:16.135 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:16 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1160636652' entity='client.admin' 2026-04-15T14:11:16.135 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:16 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3546525020' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsid": "d89dc7c6-38d4-11f1-aa58-cd98464f39ae", 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:18.282 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 0 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "vm04" 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T14:11:13:006993+0000", 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T14:11:13.007723+0000", 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:18.283 INFO:teuthology.orchestra.run.vm04.stdout:mgr not available, waiting (2/15)... 2026-04-15T14:11:18.473 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:18 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/420258061' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsid": "d89dc7c6-38d4-11f1-aa58-cd98464f39ae", 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 0 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "vm04" 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_age": 6, 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T14:11:20.986 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T14:11:13:006993+0000", 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T14:11:13.007723+0000", 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:20.987 INFO:teuthology.orchestra.run.vm04.stdout:mgr not available, waiting (3/15)... 2026-04-15T14:11:21.021 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:20 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2345249986' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsid": "d89dc7c6-38d4-11f1-aa58-cd98464f39ae", 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 0 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "vm04" 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_age": 8, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:23.346 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T14:11:13:006993+0000", 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T14:11:13.007723+0000", 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:23.347 INFO:teuthology.orchestra.run.vm04.stdout:mgr not available, waiting (4/15)... 2026-04-15T14:11:23.545 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:23 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/725383670' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsid": "d89dc7c6-38d4-11f1-aa58-cd98464f39ae", 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T14:11:25.695 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 0 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "vm04" 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T14:11:13:006993+0000", 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T14:11:13.007723+0000", 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:25.696 INFO:teuthology.orchestra.run.vm04.stdout:mgr not available, waiting (5/15)... 2026-04-15T14:11:25.814 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:25 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3885778167' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: Activating manager daemon vm04.ycniad 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: mgrmap e2: vm04.ycniad(active, starting, since 0.00507585s) 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr metadata", "who": "vm04.ycniad", "id": "vm04.ycniad"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: Manager daemon vm04.ycniad is now available 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/mirror_snapshot_schedule"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/trash_purge_schedule"} : dispatch 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' 2026-04-15T14:11:26.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:26 vm04 ceph-mon[53345]: from='mgr.14100 192.168.123.104:0/3384245392' entity='mgr.vm04.ycniad' 2026-04-15T14:11:28.127 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:28 vm04 ceph-mon[53345]: mgrmap e3: vm04.ycniad(active, since 1.00953s) 2026-04-15T14:11:28.147 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:28.147 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:28.147 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsid": "d89dc7c6-38d4-11f1-aa58-cd98464f39ae", 2026-04-15T14:11:28.147 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 0 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "vm04" 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "quorum_age": 13, 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:28.148 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T14:11:28.149 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T14:11:13:006993+0000", 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ], 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T14:11:13.007723+0000", 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout }, 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:28.150 INFO:teuthology.orchestra.run.vm04.stdout:mgr is available 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout [global] 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout fsid = d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.104:3300,v1:192.168.123.104:6789] 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout [osd] 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-15T14:11:28.746 INFO:teuthology.orchestra.run.vm04.stdout:Enabling cephadm module... 2026-04-15T14:11:29.426 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:29 vm04 ceph-mon[53345]: mgrmap e4: vm04.ycniad(active, since 2s) 2026-04-15T14:11:29.426 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:29 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3760901419' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T14:11:29.426 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:29 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1409249474' entity='client.admin' cmd={"prefix": "config assimilate-conf"} : dispatch 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "active_name": "vm04.ycniad", 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for the mgr to restart... 2026-04-15T14:11:30.173 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for mgr epoch 5... 2026-04-15T14:11:30.434 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:30 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3091706829' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "cephadm"} : dispatch 2026-04-15T14:11:30.435 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:30 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3091706829' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-04-15T14:11:30.435 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:30 vm04 ceph-mon[53345]: mgrmap e5: vm04.ycniad(active, since 3s) 2026-04-15T14:11:30.435 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:30 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1150125957' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: Active manager daemon vm04.ycniad restarted 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: Activating manager daemon vm04.ycniad 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: osdmap e2: 0 total, 0 up, 0 in 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: mgrmap e6: vm04.ycniad(active, starting, since 0.00517199s) 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr metadata", "who": "vm04.ycniad", "id": "vm04.ycniad"} : dispatch 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T14:11:40.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:40 vm04 ceph-mon[53345]: Manager daemon vm04.ycniad is now available 2026-04-15T14:11:41.009 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:41.010 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-04-15T14:11:41.010 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-15T14:11:41.010 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:41.010 INFO:teuthology.orchestra.run.vm04.stdout:mgr epoch 5 is available 2026-04-15T14:11:41.010 INFO:teuthology.orchestra.run.vm04.stdout:Verifying orchestrator module is enabled... 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: mgrmap e7: vm04.ycniad(active, since 1.00924s) 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: Found migration_current of "None". Setting to last migration. 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:11:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:11:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/mirror_snapshot_schedule"} : dispatch 2026-04-15T14:11:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/trash_purge_schedule"} : dispatch 2026-04-15T14:11:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2620271772' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "orchestrator"} : dispatch 2026-04-15T14:11:42.298 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stderr module 'orchestrator' is already enabled (always-on) 2026-04-15T14:11:42.298 INFO:teuthology.orchestra.run.vm04.stdout:Setting orchestrator backend to cephadm... 2026-04-15T14:11:43.122 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: [15/Apr/2026:14:11:41] ENGINE Bus STARTING 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: [15/Apr/2026:14:11:41] ENGINE Serving on http://192.168.123.104:8765 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: [15/Apr/2026:14:11:42] ENGINE Serving on https://192.168.123.104:7150 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: [15/Apr/2026:14:11:42] ENGINE Bus STARTED 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: [15/Apr/2026:14:11:42] ENGINE Client ('192.168.123.104', 56282) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2620271772' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "orchestrator"}]': finished 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: mgrmap e8: vm04.ycniad(active, since 2s) 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:43.123 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:42 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:11:43.128 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout value unchanged 2026-04-15T14:11:43.128 INFO:teuthology.orchestra.run.vm04.stdout:Generating ssh key... 2026-04-15T14:11:44.132 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:43 vm04 ceph-mon[53345]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:44.133 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:43 vm04 ceph-mon[53345]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:44.133 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:43 vm04 ceph-mon[53345]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:44.133 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:43 vm04 ceph-mon[53345]: Generating ssh key... 2026-04-15T14:11:44.133 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:43 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:44.133 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:43 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:44.295 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDYk0GsxW2oi7DpN75lBSDtu7gXKOKtbSSbR080IWGCCkYEg9LA4+2JijD8k/uA8dRHdvdTLbLeY5Ooc7LPn5hjuJfhr5tdXII9AG9kNBUZhsKfn1M019ZW5/P3S/N271yJSWZjOeeb/4amTNyCrGgy4EXhd0jF8LbI62uwxy+WJzyUhDOLM8LrBzGiPehqAhpjaR9M4mfk0UM8R0rKbTqCII3zoFlkiAYO6ITHNb+AYNfb13wBGE3NVQvmY6FlGRhQX0Dgw12FebZxXp6J6RUCtRrmcF8s0HMIgEekh0g1LaJa0l7aF76A9jlrGAYdNHpsQURzqvwpv+t8S/MDsKT2gSADzaDq/ve8xDShyhEeZM0quf0jEctfbhtojJi5e531vE0CBGiTShgGc2031SQTmjPRKhAkr6/a3X75AGUdcOltuFjJkIWrR6ONnFbgEKPSdKJ4jo5gIxE7Sax72MTqUSjId1uqhtMxWP4ZMrzLBfTlIBDtEMj4LG3tNnzh3pM= ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:11:44.295 INFO:teuthology.orchestra.run.vm04.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-04-15T14:11:44.296 INFO:teuthology.orchestra.run.vm04.stdout:Adding key to root@localhost authorized_keys... 2026-04-15T14:11:44.296 INFO:teuthology.orchestra.run.vm04.stdout:Adding host vm04... 2026-04-15T14:11:45.327 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:44 vm04 ceph-mon[53345]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:46.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:45 vm04 ceph-mon[53345]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm04", "addr": "192.168.123.104", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:46.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:45 vm04 ceph-mon[53345]: Deploying cephadm binary to vm04 2026-04-15T14:11:47.283 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Added host 'vm04' with addr '192.168.123.104' 2026-04-15T14:11:47.283 INFO:teuthology.orchestra.run.vm04.stdout:Deploying mon service with default placement... 2026-04-15T14:11:47.711 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-04-15T14:11:47.711 INFO:teuthology.orchestra.run.vm04.stdout:Deploying mgr service with default placement... 2026-04-15T14:11:48.120 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-04-15T14:11:48.120 INFO:teuthology.orchestra.run.vm04.stdout:Deploying crash service with default placement... 2026-04-15T14:11:48.311 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:48 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:48.311 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:48 vm04 ceph-mon[53345]: Added host vm04 2026-04-15T14:11:48.311 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:48 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:11:48.311 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:48 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:48.311 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:48 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:48.562 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-04-15T14:11:48.562 INFO:teuthology.orchestra.run.vm04.stdout:Deploying ceph-exporter service with default placement... 2026-04-15T14:11:48.987 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-04-15T14:11:48.988 INFO:teuthology.orchestra.run.vm04.stdout:Deploying prometheus service with default placement... 2026-04-15T14:11:49.399 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-04-15T14:11:49.399 INFO:teuthology.orchestra.run.vm04.stdout:Deploying grafana service with default placement... 2026-04-15T14:11:49.791 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:49.791 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: Saving service mon spec with placement count:5 2026-04-15T14:11:49.791 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:49.791 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: Saving service mgr spec with placement count:2 2026-04-15T14:11:49.791 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:49.791 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: Saving service crash spec with placement * 2026-04-15T14:11:49.791 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:49.792 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:49.792 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:49.792 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:49.792 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:49.792 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:49 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:49.828 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-04-15T14:11:49.828 INFO:teuthology.orchestra.run.vm04.stdout:Deploying node-exporter service with default placement... 2026-04-15T14:11:50.245 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-04-15T14:11:50.245 INFO:teuthology.orchestra.run.vm04.stdout:Deploying alertmanager service with default placement... 2026-04-15T14:11:50.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:50 vm04 ceph-mon[53345]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:50.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:50 vm04 ceph-mon[53345]: Saving service ceph-exporter spec with placement * 2026-04-15T14:11:50.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:50 vm04 ceph-mon[53345]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:50.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:50 vm04 ceph-mon[53345]: Saving service prometheus spec with placement count:1 2026-04-15T14:11:50.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:50 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:50.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:50 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:50.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:50 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:50.673 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-04-15T14:11:51.463 INFO:teuthology.orchestra.run.vm04.stdout:Enabling the dashboard module... 2026-04-15T14:11:51.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:51 vm04 ceph-mon[53345]: from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:51.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:51 vm04 ceph-mon[53345]: Saving service grafana spec with placement count:1 2026-04-15T14:11:51.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:51 vm04 ceph-mon[53345]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:51.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:51 vm04 ceph-mon[53345]: Saving service node-exporter spec with placement * 2026-04-15T14:11:51.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:51 vm04 ceph-mon[53345]: from='mgr.14124 192.168.123.104:0/3521370871' entity='mgr.vm04.ycniad' 2026-04-15T14:11:51.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:51 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4155108233' entity='client.admin' 2026-04-15T14:11:51.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:51 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1974913603' entity='client.admin' 2026-04-15T14:11:52.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:52 vm04 ceph-mon[53345]: from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:11:52.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:52 vm04 ceph-mon[53345]: Saving service alertmanager spec with placement count:1 2026-04-15T14:11:52.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:52 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/745918620' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "dashboard"} : dispatch 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "active_name": "vm04.ycniad", 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for the mgr to restart... 2026-04-15T14:11:53.018 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for mgr epoch 9... 2026-04-15T14:11:53.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:53 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/745918620' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-04-15T14:11:53.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:53 vm04 ceph-mon[53345]: mgrmap e9: vm04.ycniad(active, since 12s) 2026-04-15T14:11:53.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:11:53 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/669353931' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: Active manager daemon vm04.ycniad restarted 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: Activating manager daemon vm04.ycniad 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: osdmap e3: 0 total, 0 up, 0 in 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: mgrmap e10: vm04.ycniad(active, starting, since 0.00563111s) 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr metadata", "who": "vm04.ycniad", "id": "vm04.ycniad"} : dispatch 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T14:12:03.221 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:02 vm04 ceph-mon[53345]: Manager daemon vm04.ycniad is now available 2026-04-15T14:12:03.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout { 2026-04-15T14:12:03.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-04-15T14:12:03.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-15T14:12:03.898 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout } 2026-04-15T14:12:03.898 INFO:teuthology.orchestra.run.vm04.stdout:mgr epoch 9 is available 2026-04-15T14:12:03.898 INFO:teuthology.orchestra.run.vm04.stdout:Using certmgr to generate dashboard self-signed certificate... 2026-04-15T14:12:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:03 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:12:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:03 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/mirror_snapshot_schedule"} : dispatch 2026-04-15T14:12:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:03 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/trash_purge_schedule"} : dispatch 2026-04-15T14:12:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:03 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:03 vm04 ceph-mon[53345]: mgrmap e11: vm04.ycniad(active, since 1.00946s) 2026-04-15T14:12:04.730 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout {"cert": "-----BEGIN CERTIFICATE-----\nMIIE/zCCAuegAwIBAgIUZAPJQpgcsV8fLu5jqyG0GgcN7cMwDQYJKoZIhvcNAQEL\nBQAwFzEVMBMGA1UEAwwMY2VwaGFkbS1yb290MB4XDTI2MDQxNTE0MTIwNFoXDTI5\nMDQxNDE0MTIwNFowGjEYMBYGA1UEAwwPMTkyLjE2OC4xMjMuMTA0MIICIjANBgkq\nhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0HVGzcJAFz4ouIISas4+DYKU4d0Ktjxk\ndOMNt+doM5G7Bhqhi/XTxa+GtITUcz59HYDX1RiZS+uLUYozow9q3ARsEM/WjKNu\nBsdk6vx6XR8J6r7IC2XncKIOtHAzy2G5nuRsz9rtCd/7XdqDgADul+xHszKbStuz\nlMgw8UYIz7wp2DHhSHr8EO5HmoU9t4lKytJIhGrsG1gVRQd3CnuSQsTYHYTZNtjb\nQpZsrPVZmHCuAtZkGhv3Pc5bjg3ttVZdGNl/TbIjKT0s2no+lJEozjj8qq2iD590\nvyZhOknaACYuYjZ8ERREV1FLfMubL3S9x5fGi7RE3UETY4u8OiLqhnpBy5jMMik9\nVUmygVqFtghGuqfrEKlt7oAv/sJcJhmQNuKNnRynY0RMaIJGu+5wLP2P/7HrPLQi\nm7dkCaGg6V4lRHy9orpRIZvPjMsQqeBv1Rg3iKIGPay7oV21SSaPd4j4CBmO035h\nNH+7mY0+0IrlVNFWSJkGRyvaWgszo8mluL4AdCCptR9GRumsinMeX2sLkdvddxK2\nB9icGuRtrsWo31DmiUdiEuinPOZSp1G4Fd4t7bgsHGLfPTT7YecNComX5O0Mqqnn\nQUlDAoLqovnXy6VIYdSSYQvvqFKPC+Uz1OaHFa/gFjaFZZePE8/mkzDzW4jx+OQr\nH6U0vWO4s08CAwEAAaNAMD4wLgYDVR0RBCcwJYIKdm0wNC5sb2NhbIIRZGFzaGJv\nYXJkX3NlcnZlcnOHBMCoe2gwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOC\nAgEAGQTEYi0rUd59DdKXlqqmC1Z72PMch8yud+u7ySxId+Ej8zNtfO3CSTmiZz4g\n2r9CoW9r4LbDIvRbQNyYLvEwYvK/15l3nb/klabggdrwlorQ5iwO+t8txeOl7U93\nGGFnWLBsvx/9sID/bICN1ZpgbItswSim7tcxCd0NIwFAKBz7hw7nYHPvWgbkV4Xe\nh1oCYjrV10YEGVbYfADNIQmGOBEIS0KFxbzcMQuhQBD9hJTtWUXcM/zPgU6MjUwW\nMdZlDi8fVjN1SwsdHVhqeFzL2VhSaMaaN5dgKrakK2ukr4nCRDZie+8ZghGh59T/\nbjSEp96yKieAdBPSMPf/CJyntY0rW5TvEn2ei6aAG24aNuHm9FjFppDX2DDtZ9eL\nT0rxUXS2ak9bCm78QEACT6GTTGRJjLKnowABjrKyLADl97/mRfXlk9yGao/d9BSw\nVxQ9v28QrBwNAWmi1tTOI1IuS4gIt3AukBron4b/mmXfXOgkCUa7fvqZjj7n2de4\nRdpLTPNTNlIOZvb874+8Dw2y7wVs8n9HGO+D8YB+sZm+yFGI/FvANT7i+6nfZGB3\nmPXZDt+mygvVjdyItoztSybg5UqI8tQKVSQotSWzn/vmUK/855DCX0ID8v1K6uUK\nevkGXF2Xp548f0BGAs53XooAFkXsppCW7QISU9ZcJ1ecp/w=\n-----END CERTIFICATE-----\n", "key": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKQIBAAKCAgEA0HVGzcJAFz4ouIISas4+DYKU4d0KtjxkdOMNt+doM5G7Bhqh\ni/XTxa+GtITUcz59HYDX1RiZS+uLUYozow9q3ARsEM/WjKNuBsdk6vx6XR8J6r7I\nC2XncKIOtHAzy2G5nuRsz9rtCd/7XdqDgADul+xHszKbStuzlMgw8UYIz7wp2DHh\nSHr8EO5HmoU9t4lKytJIhGrsG1gVRQd3CnuSQsTYHYTZNtjbQpZsrPVZmHCuAtZk\nGhv3Pc5bjg3ttVZdGNl/TbIjKT0s2no+lJEozjj8qq2iD590vyZhOknaACYuYjZ8\nERREV1FLfMubL3S9x5fGi7RE3UETY4u8OiLqhnpBy5jMMik9VUmygVqFtghGuqfr\nEKlt7oAv/sJcJhmQNuKNnRynY0RMaIJGu+5wLP2P/7HrPLQim7dkCaGg6V4lRHy9\norpRIZvPjMsQqeBv1Rg3iKIGPay7oV21SSaPd4j4CBmO035hNH+7mY0+0IrlVNFW\nSJkGRyvaWgszo8mluL4AdCCptR9GRumsinMeX2sLkdvddxK2B9icGuRtrsWo31Dm\niUdiEuinPOZSp1G4Fd4t7bgsHGLfPTT7YecNComX5O0MqqnnQUlDAoLqovnXy6VI\nYdSSYQvvqFKPC+Uz1OaHFa/gFjaFZZePE8/mkzDzW4jx+OQrH6U0vWO4s08CAwEA\nAQKCAgABQtxhTU4O9fDQIPVUkRfeBA67iRdR/+6JqIACP1OpkDWl9U5J5vxXg1bY\ny77vTY77OTbVEreGto+auC/UyjiWyS4QMfm10x1MzDMHmuxwmMn6EhpquzyZuVlG\nbTFbByXU2MPxw/zQJPtD15kZEqhe4e6THnjDDK469bUaGJbgDt7f8HlQ83GH9rEX\nNbwoWpvJu1j1+CGijrdeOgvV0+aQIdm+3/lidBBriDZzG27l1bSvzrU/VTD7Pj7e\njqYEBVg92L8E7NF6o+ScE8ZCJtis2rpom/FRj+DocSaAcEbx3klEjdjVT56UOqku\nwZqYQGgPh5MLcFVEU2r3HLbQUTrC/7cutb8oI32hEmKeEMix4m2RZbYs56EtveSV\nkpB0+qRZJkbFfThN9dhWaIypEIdl2iUd+FIA2NzNj1cNX56wt0IPNeaVkuRO3+mH\nc7atet0qAb1hrNM5NM2JITaV+hnAbi/CZG7Ki45aq6NO93vsi51qvuHtukhM2MaK\nmOS9gOisv40wB2vtjSj17+3lZ8Wi+7M4jPRWnnyUc7lVguqGi8j5aXTC8ra1gh6e\nPqmndvSKtJnYR5Ne9lAUPyS3CUk/FdggkAhyig2BHkiFQXk+zXs4FWNeiN/Yit3E\nSkfeRF5NwTTmkSK1NQAs7cj8DBAbztNegkUC19gAabjkmXisIQKCAQEA+4DP72D1\nSxMxMLjUK5fSsscsNGCh00aWLBbN+ed3hkK23JExQVBv8oHXleO0FMKJso6HffEf\n6BIVi06g63ccFlXJ9Sahx6T4R+nEjRL9O9VfsYhpQBWWVwYBYVY3pjddDatCoCpK\nXKGXczOFWC+NzMdkbJRqFJBSjY9WE+okzNMMRBkb7jKbEOO2lHU+RHpP1snYly07\nsQdQLp0FFO0J9O8P0VJTDICEJbTIstySA8fDb86qW+Jj5367awfBkXG6VJ2gJ3gC\nM1HNT4pp6+IixIj7PgNF1yE0LKyfQBm9bWfFmBrSyU82pa1oSz7mWgPmgnJeysj5\nqrcU+xxTN6Ob4QKCAQEA1C9v7KcIzDyAaocbAeNy6WH4KEaQiUR7eF35e+1j8GND\nkxyIRVv/0CVwZBxCKMtqIBcGSUz4Z5G0PmmHRM+EWwCReddDJHa5KlC5ju+rAIPx\nwC25jzN1R6v71IZUh0x2NCfrHuIg9UOApT3C/UgPEMjNqHAfl31MDhAFTJ4kgBjy\ntLL+tETCMOtg8tywy4GtORagTC9gnpFhRg8TEQMW5Qz43WixPgIFBqidV1wszM3/\nsiIA6PE+A0JyJk3mtaA9GgvC5JC8zRKZPMdruJs1KqP/d3EQNgN39Au7PZ1OULCK\nS71vv/jsqr7ayALO15nMrwkA2ZZcnU5toIkyFpm1LwKCAQEAwCW7t3FmNoNlM4oi\nX2PRD9FnjmS9fgMRvIF3wJP9F1BlEMLE1Ne1j5tvsA6ViL7QIOEP10KfU9htKU9b\nVkpb5RyJ/YgUVclN/vM0vWtimZH9WE+VyeY0RCxMZadPQOhPaaAMeP2WborugI8+\nMw+x0GAN8364RRm2WHzIENL0ZqBfYK+mTSfQnOi8l1OzzPF9ieYQWf+u+SNTOSmx\ntjkbQExc01mIJacSTxR9uVkXHVhDkmpccXLXTQff/HeihZJT/02mDEF9vOOkbj6b\niiHs2ialfM1XUHMHv5Hq4ThJJwr/RaRIopCoeY+wfLyDlQYh2Eu07Ez7olfZ6bfM\nRFf1QQKCAQEAvHkKfPX9vZR8KKOrnKjQ+Hb6O6Q6XIOE/OzUX30EyZEShAjTDnDi\nsJug0nu7VIsi0DBDeDazayOkK9AGxFtj4IWHsuc/j5MZdHnR6SIADewlf6Pd8FIv\n+aLNhQBw8uE7GabflUYdKl50VdKfceVg5HLJIrW9ig1U0iTu4BiUqWf/pfxce/mb\nWGKrce4P6CRenpf3eNSYiSDve5CGShUpb/lu0RReY4rdtgVuBRwt+xl751mG32Lx\nx2ltBZMqu+gCfjE2Q1pBKhM8JB2rFIZg2mhj0k+Oq7u3AAKvpdIlr0yw+wfgzG+q\noRv4L7M20ZGem1du9tc1mtEAVZmn8SVFJwKCAQA/99vswC8KIg9VI4tsdYMHQ8tY\nac30Yfh6Yh3GbpNb5aRrtumcYfDosEExx0BOB5q69sd0ffMJYWg1HeO5dSv2UA0i\n7BSQNV33uc0Uu1bp54wFvj1kUR+0KgIXMl1z35OtdlTE1ItTkt8e7NUsED4JZfJQ\n3FbvRUZE8lXAkkwV+e2vSlpEGoS4+Jq/OsM6M3hY6Q+rMrKDWhTEid8V+f+qFyiT\n5qdXejpFv0zi96jQsS5NzGH4Q/qqey0Sa3Z8J5zbYABrvxTTEu0x3cvesroombwA\n/ZZha/p0XstvFWmWLMT2zNvBSCS9F8B20WSFEUPfVD8wwwixYdXbFigaKYJN\n-----END RSA PRIVATE KEY-----\n"} 2026-04-15T14:12:05.144 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout SSL certificate updated 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:03] ENGINE Bus STARTING 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:03] ENGINE Serving on http://192.168.123.104:8765 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:03] ENGINE Serving on https://192.168.123.104:7150 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:03] ENGINE Bus STARTED 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:03] ENGINE Client ('192.168.123.104', 38796) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch certmgr generate-certificates", "module_name": "dashboard", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:12:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:05 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:05.551 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout SSL certificate key updated 2026-04-15T14:12:05.551 INFO:teuthology.orchestra.run.vm04.stdout:Creating initial admin user... 2026-04-15T14:12:06.092 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$NNAk8UKKmiABG3EEgM/2Su4Wm.2D3KV2Jw14Cdt68jT3ICGbpHhBq", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1776262326, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-04-15T14:12:06.092 INFO:teuthology.orchestra.run.vm04.stdout:Fetching dashboard port number... 2026-04-15T14:12:06.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:06 vm04 ceph-mon[53345]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:12:06.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:06 vm04 ceph-mon[53345]: mgrmap e12: vm04.ycniad(active, since 2s) 2026-04-15T14:12:06.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:06 vm04 ceph-mon[53345]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:12:06.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:06 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:06.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:06 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:06.462 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stdout 8443 2026-04-15T14:12:06.462 INFO:teuthology.orchestra.run.vm04.stdout:firewalld does not appear to be present 2026-04-15T14:12:06.462 INFO:teuthology.orchestra.run.vm04.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-04-15T14:12:06.464 INFO:teuthology.orchestra.run.vm04.stdout:Ceph Dashboard is now available at: 2026-04-15T14:12:06.464 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.464 INFO:teuthology.orchestra.run.vm04.stdout: URL: https://vm04.local:8443/ 2026-04-15T14:12:06.464 INFO:teuthology.orchestra.run.vm04.stdout: User: admin 2026-04-15T14:12:06.464 INFO:teuthology.orchestra.run.vm04.stdout: Password: i198mb85g9 2026-04-15T14:12:06.464 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.464 INFO:teuthology.orchestra.run.vm04.stdout:Saving cluster configuration to /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config directory 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout:Or, if you are only running a single cluster on this host: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout:Please consider enabling telemetry to help improve Ceph: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: ceph telemetry on 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout:For more information see: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:06.885 INFO:teuthology.orchestra.run.vm04.stdout:Bootstrap complete. 2026-04-15T14:12:06.895 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout static 2026-04-15T14:12:06.902 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 3 from systemctl is-active logrotate 2026-04-15T14:12:06.902 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stdout inactive 2026-04-15T14:12:06.902 INFO:teuthology.orchestra.run.vm04.stdout:Enabling the logrotate.timer service to perform daily log rotation. 2026-04-15T14:12:07.067 INFO:tasks.cephadm:Fetching config... 2026-04-15T14:12:07.067 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:12:07.067 DEBUG:teuthology.orchestra.run.vm04:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-04-15T14:12:07.083 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-04-15T14:12:07.083 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:12:07.083 DEBUG:teuthology.orchestra.run.vm04:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-04-15T14:12:07.142 INFO:tasks.cephadm:Fetching mon keyring... 2026-04-15T14:12:07.142 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:12:07.143 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/keyring of=/dev/stdout 2026-04-15T14:12:07.208 INFO:tasks.cephadm:Fetching pub ssh key... 2026-04-15T14:12:07.208 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:12:07.208 DEBUG:teuthology.orchestra.run.vm04:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-04-15T14:12:07.266 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-04-15T14:12:07.266 DEBUG:teuthology.orchestra.run.vm04:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDYk0GsxW2oi7DpN75lBSDtu7gXKOKtbSSbR080IWGCCkYEg9LA4+2JijD8k/uA8dRHdvdTLbLeY5Ooc7LPn5hjuJfhr5tdXII9AG9kNBUZhsKfn1M019ZW5/P3S/N271yJSWZjOeeb/4amTNyCrGgy4EXhd0jF8LbI62uwxy+WJzyUhDOLM8LrBzGiPehqAhpjaR9M4mfk0UM8R0rKbTqCII3zoFlkiAYO6ITHNb+AYNfb13wBGE3NVQvmY6FlGRhQX0Dgw12FebZxXp6J6RUCtRrmcF8s0HMIgEekh0g1LaJa0l7aF76A9jlrGAYdNHpsQURzqvwpv+t8S/MDsKT2gSADzaDq/ve8xDShyhEeZM0quf0jEctfbhtojJi5e531vE0CBGiTShgGc2031SQTmjPRKhAkr6/a3X75AGUdcOltuFjJkIWrR6ONnFbgEKPSdKJ4jo5gIxE7Sax72MTqUSjId1uqhtMxWP4ZMrzLBfTlIBDtEMj4LG3tNnzh3pM= ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-15T14:12:07.347 INFO:teuthology.orchestra.run.vm04.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDYk0GsxW2oi7DpN75lBSDtu7gXKOKtbSSbR080IWGCCkYEg9LA4+2JijD8k/uA8dRHdvdTLbLeY5Ooc7LPn5hjuJfhr5tdXII9AG9kNBUZhsKfn1M019ZW5/P3S/N271yJSWZjOeeb/4amTNyCrGgy4EXhd0jF8LbI62uwxy+WJzyUhDOLM8LrBzGiPehqAhpjaR9M4mfk0UM8R0rKbTqCII3zoFlkiAYO6ITHNb+AYNfb13wBGE3NVQvmY6FlGRhQX0Dgw12FebZxXp6J6RUCtRrmcF8s0HMIgEekh0g1LaJa0l7aF76A9jlrGAYdNHpsQURzqvwpv+t8S/MDsKT2gSADzaDq/ve8xDShyhEeZM0quf0jEctfbhtojJi5e531vE0CBGiTShgGc2031SQTmjPRKhAkr6/a3X75AGUdcOltuFjJkIWrR6ONnFbgEKPSdKJ4jo5gIxE7Sax72MTqUSjId1uqhtMxWP4ZMrzLBfTlIBDtEMj4LG3tNnzh3pM= ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:12:07.361 DEBUG:teuthology.orchestra.run.vm05:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDYk0GsxW2oi7DpN75lBSDtu7gXKOKtbSSbR080IWGCCkYEg9LA4+2JijD8k/uA8dRHdvdTLbLeY5Ooc7LPn5hjuJfhr5tdXII9AG9kNBUZhsKfn1M019ZW5/P3S/N271yJSWZjOeeb/4amTNyCrGgy4EXhd0jF8LbI62uwxy+WJzyUhDOLM8LrBzGiPehqAhpjaR9M4mfk0UM8R0rKbTqCII3zoFlkiAYO6ITHNb+AYNfb13wBGE3NVQvmY6FlGRhQX0Dgw12FebZxXp6J6RUCtRrmcF8s0HMIgEekh0g1LaJa0l7aF76A9jlrGAYdNHpsQURzqvwpv+t8S/MDsKT2gSADzaDq/ve8xDShyhEeZM0quf0jEctfbhtojJi5e531vE0CBGiTShgGc2031SQTmjPRKhAkr6/a3X75AGUdcOltuFjJkIWrR6ONnFbgEKPSdKJ4jo5gIxE7Sax72MTqUSjId1uqhtMxWP4ZMrzLBfTlIBDtEMj4LG3tNnzh3pM= ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-15T14:12:07.397 INFO:teuthology.orchestra.run.vm05.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDYk0GsxW2oi7DpN75lBSDtu7gXKOKtbSSbR080IWGCCkYEg9LA4+2JijD8k/uA8dRHdvdTLbLeY5Ooc7LPn5hjuJfhr5tdXII9AG9kNBUZhsKfn1M019ZW5/P3S/N271yJSWZjOeeb/4amTNyCrGgy4EXhd0jF8LbI62uwxy+WJzyUhDOLM8LrBzGiPehqAhpjaR9M4mfk0UM8R0rKbTqCII3zoFlkiAYO6ITHNb+AYNfb13wBGE3NVQvmY6FlGRhQX0Dgw12FebZxXp6J6RUCtRrmcF8s0HMIgEekh0g1LaJa0l7aF76A9jlrGAYdNHpsQURzqvwpv+t8S/MDsKT2gSADzaDq/ve8xDShyhEeZM0quf0jEctfbhtojJi5e531vE0CBGiTShgGc2031SQTmjPRKhAkr6/a3X75AGUdcOltuFjJkIWrR6ONnFbgEKPSdKJ4jo5gIxE7Sax72MTqUSjId1uqhtMxWP4ZMrzLBfTlIBDtEMj4LG3tNnzh3pM= ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:12:07.408 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-04-15T14:12:07.434 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:07 vm04 ceph-mon[53345]: from='client.14188 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:12:07.434 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:07 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3706813759' entity='client.admin' cmd={"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"} : dispatch 2026-04-15T14:12:07.434 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:07 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3272147384' entity='client.admin' 2026-04-15T14:12:07.544 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:12:07.978 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-04-15T14:12:07.978 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-04-15T14:12:08.167 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:12:08.643 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm05 2026-04-15T14:12:08.644 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:12:08.644 DEBUG:teuthology.orchestra.run.vm05:> dd of=/etc/ceph/ceph.conf 2026-04-15T14:12:08.662 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:12:08.662 DEBUG:teuthology.orchestra.run.vm05:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-15T14:12:08.718 INFO:tasks.cephadm:Adding host vm05 to orchestrator... 2026-04-15T14:12:08.718 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch host add vm05 2026-04-15T14:12:08.850 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3388092786' entity='client.admin' 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: Deploying daemon ceph-exporter.vm04 on vm04 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='client.14196 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:12:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:08 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='client.14198 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm05", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: mgrmap e13: vm04.ycniad(active, since 6s) 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T14:12:10.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:10 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:11.690 INFO:teuthology.orchestra.run.vm04.stdout:Added host 'vm05' with addr '192.168.123.105' 2026-04-15T14:12:11.754 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:11 vm04 ceph-mon[53345]: Deploying cephadm binary to vm05 2026-04-15T14:12:11.754 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:11 vm04 ceph-mon[53345]: Deploying daemon crash.vm04 on vm04 2026-04-15T14:12:11.754 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:11 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:11.754 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:11 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:11.754 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:11 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:11.754 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:11 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:11.793 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch host ls --format=json 2026-04-15T14:12:11.933 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:12:12.307 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:12:12.307 INFO:teuthology.orchestra.run.vm04.stdout:[{"addr": "192.168.123.104", "hostname": "vm04", "labels": [], "status": ""}, {"addr": "192.168.123.105", "hostname": "vm05", "labels": [], "status": ""}] 2026-04-15T14:12:12.360 INFO:tasks.cephadm:Setting crush tunables to default 2026-04-15T14:12:12.360 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd crush tunables default 2026-04-15T14:12:12.496 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:12:12.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:12 vm04 ceph-mon[53345]: Deploying daemon node-exporter.vm04 on vm04 2026-04-15T14:12:12.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:12 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:13.581 INFO:teuthology.orchestra.run.vm04.stderr:adjusted tunables profile to default 2026-04-15T14:12:13.671 INFO:tasks.cephadm:Adding mon.vm04 on vm04 2026-04-15T14:12:13.671 INFO:tasks.cephadm:Adding mon.vm05 on vm05 2026-04-15T14:12:13.671 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch apply mon '2;vm04:192.168.123.104=vm04;vm05:192.168.123.105=vm05' 2026-04-15T14:12:13.843 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:13.843 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:13.860 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:13 vm04 ceph-mon[53345]: Added host vm05 2026-04-15T14:12:13.860 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:13 vm04 ceph-mon[53345]: from='client.14201 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:12:13.860 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:13 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2184608325' entity='client.admin' cmd={"prefix": "osd crush tunables", "profile": "default"} : dispatch 2026-04-15T14:12:13.860 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:13 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:14.230 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled mon update... 2026-04-15T14:12:14.295 DEBUG:teuthology.orchestra.run.vm05:mon.vm05> sudo journalctl -f -n 0 -u ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm05.service 2026-04-15T14:12:14.297 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:14.297 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:14.471 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:14.472 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:14 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2184608325' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-04-15T14:12:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:14 vm04 ceph-mon[53345]: osdmap e4: 0 total, 0 up, 0 in 2026-04-15T14:12:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:14 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:14 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:14 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:14 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:14 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:14.975 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:14.976 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:14.976 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:15.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:15 vm04 ceph-mon[53345]: Deploying daemon alertmanager.vm04 on vm04 2026-04-15T14:12:15.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:15 vm04 ceph-mon[53345]: from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm04:192.168.123.104=vm04;vm05:192.168.123.105=vm05", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:12:15.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:15 vm04 ceph-mon[53345]: Saving service mon spec with placement vm04:192.168.123.104=vm04;vm05:192.168.123.105=vm05;count:2 2026-04-15T14:12:15.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:15 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3031120655' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:16.035 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:16.035 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:16.188 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:16.188 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:16.615 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:16.615 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:16.615 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:16.982 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:16 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/825400209' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:17.672 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:17.672 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:17.811 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:17.811 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:18.270 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:18.270 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:18.270 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: Generating cephadm-signed certificates for grafana_cert/grafana_key 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:18.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:18 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3193629483' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:19.321 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:19.321 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:19.455 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:19.455 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:19.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:19 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T14:12:19.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:19 vm04 ceph-mon[53345]: Deploying daemon grafana.vm04 on vm04 2026-04-15T14:12:19.863 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:19.864 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:19.864 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:20 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/76678380' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:20.930 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:20.930 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:21.063 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:21.063 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:21.472 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:21.472 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:21.472 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:21.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:21 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3910042884' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:22.550 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:22.550 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:22.684 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:22.684 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:23.112 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:23.112 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:23.112 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:23 vm04 ceph-mon[53345]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:12:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:23 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/1372130271' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:24.176 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:24.176 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:24.325 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:24.326 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:24.773 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:24.773 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:24.773 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:25.315 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:25 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/1812628347' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:25.827 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:25.827 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:25.970 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:25.971 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.177 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:26 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:26.409 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:26.409 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:26.409 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:27.204 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:27 vm04 ceph-mon[53345]: Deploying daemon prometheus.vm04 on vm04 2026-04-15T14:12:27.204 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:27 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/373336379' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:27.516 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:27.516 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:27.653 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:27.653 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:28.073 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:28.073 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:28.073 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:28.268 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:28 vm04 ceph-mon[53345]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:12:29.125 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:29.125 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:29.259 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:29.259 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:29 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/4090045829' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:29 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:29.676 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:29.676 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:29.676 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:30.724 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:30.725 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:30.798 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:30 vm04 ceph-mon[53345]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:12:30.798 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:30 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3117605154' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:30.862 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:30.862 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:31.291 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:31.291 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:31.291 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:31.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:31 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/1580440837' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:32.346 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:32.346 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:32.486 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:32.486 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:32.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:32 vm04 ceph-mon[53345]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:12:32.962 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:32.962 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:32.962 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:34.013 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:34.013 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:33 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:33 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:33 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:33 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-15T14:12:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:33 vm04 ceph-mon[53345]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:12:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:33 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/1714227711' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:33 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:34.151 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:34.151 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:34.582 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:34.582 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:34.582 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:35.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:34 vm04 ceph-mon[53345]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-15T14:12:35.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:34 vm04 ceph-mon[53345]: mgrmap e14: vm04.ycniad(active, since 30s) 2026-04-15T14:12:35.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:34 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/4278484000' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:35.737 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:35.737 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:35.866 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:35.866 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:36.293 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:36.293 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:36.293 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:36.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:36 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3971583649' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:37.361 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:37.362 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:37.502 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:37.502 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:37.930 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:37.930 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:37.930 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:38.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:37 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/4106566592' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:38.993 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:38.993 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:39.134 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:39.134 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:39.564 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:39.564 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:39.565 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:39.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:39 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/207370587' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:40.620 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:40.620 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:40.765 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:40.765 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:41.208 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:41.208 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:41.208 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:41.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/617229888' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:42.260 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:42.260 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:42.399 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:42.400 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:42.838 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:42.838 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:42.838 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:43.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:42 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/1694948806' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:43.900 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:43.900 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:44.039 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:44.039 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: Active manager daemon vm04.ycniad restarted 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: Activating manager daemon vm04.ycniad 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: osdmap e5: 0 total, 0 up, 0 in 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: mgrmap e15: vm04.ycniad(active, starting, since 0.00555217s) 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr metadata", "who": "vm04.ycniad", "id": "vm04.ycniad"} : dispatch 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T14:12:44.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:44 vm04 ceph-mon[53345]: Manager daemon vm04.ycniad is now available 2026-04-15T14:12:44.455 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:44.455 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:44.455 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/mirror_snapshot_schedule"} : dispatch 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/trash_purge_schedule"} : dispatch 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/2303848615' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:45.271 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:45 vm04 ceph-mon[53345]: mgrmap e16: vm04.ycniad(active, since 1.0103s) 2026-04-15T14:12:45.534 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:45.534 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:45.665 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:45.665 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:46.133 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:46.134 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:46.134 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:45] ENGINE Bus STARTING 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:45] ENGINE Serving on http://192.168.123.104:8765 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:45] ENGINE Serving on https://192.168.123.104:7150 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:45] ENGINE Bus STARTED 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: [15/Apr/2026:14:12:45] ENGINE Client ('192.168.123.104', 53276) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3250949836' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:46 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-15T14:12:47.189 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:47.189 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:47.404 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:47.404 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T14:12:47.837 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:47.837 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:47.837 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: mgrmap e17: vm04.ycniad(active, since 2s) 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"} : dispatch 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm04:/etc/ceph/ceph.conf 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm05:/etc/ceph/ceph.conf 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm05:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm04:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm05:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.client.admin.keyring 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: Updating vm04:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.client.admin.keyring 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm05", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm05", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:48.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:47 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3022857473' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:48.895 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:48.895 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:49.116 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:12:49.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:48 vm04 ceph-mon[53345]: Deploying daemon ceph-exporter.vm05 on vm05 2026-04-15T14:12:49.500 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:49.501 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:49.501 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm05", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm05", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: Deploying daemon crash.vm05 on vm05 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3862752732' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:50 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:50.649 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:50.649 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:50.777 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:12:51.187 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:51.187 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:51.187 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:51.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:51 vm04 ceph-mon[53345]: Deploying daemon node-exporter.vm05 on vm05 2026-04-15T14:12:51.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:51 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/1046433175' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:52.234 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:52.235 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:52.408 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:12:52.872 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:52.872 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:52.872 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:53.931 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:53.932 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:54.111 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm05/config 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm05.ozgwuj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm05.ozgwuj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: Deploying daemon mgr.vm05.ozgwuj on vm05 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/1563662373' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T14:12:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:53 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:54.408 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 systemd[1]: Starting Ceph mon.vm05 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae... 2026-04-15T14:12:54.601 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:12:54.601 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:11:11.742276Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T14:12:54.601 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 podman[57815]: 2026-04-15 14:12:54.526257799 +0000 UTC m=+0.025812001 container create c33af830112a7b1a52d5917062a40a0cec23bf74426f2cf7e036e494d14bf03b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9) 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 podman[57815]: 2026-04-15 14:12:54.568934373 +0000 UTC m=+0.068488585 container init c33af830112a7b1a52d5917062a40a0cec23bf74426f2cf7e036e494d14bf03b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 podman[57815]: 2026-04-15 14:12:54.572679603 +0000 UTC m=+0.072233815 container start c33af830112a7b1a52d5917062a40a0cec23bf74426f2cf7e036e494d14bf03b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 bash[57815]: c33af830112a7b1a52d5917062a40a0cec23bf74426f2cf7e036e494d14bf03b 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 podman[57815]: 2026-04-15 14:12:54.51036344 +0000 UTC m=+0.009917662 image pull 259950fb12cb763f6889e1e4c320167a5351669158cfdd94a1086a8bb5694c2e harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 systemd[1]: Started Ceph mon.vm05 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae. 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: set uid:gid to 167:167 (ceph:ceph) 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable - RelWithDebInfo), process ceph-mon, pid 2 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: pidfile_write: ignore empty --pid-file 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: load: jerasure load: lrc 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: RocksDB version: 7.9.2 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Git sha 0 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Compile date 2026-04-14 11:30:02 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: DB SUMMARY 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: DB Session ID: SR3NS5Z17L4FI8JKWDO3 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: CURRENT file: CURRENT 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: IDENTITY file: IDENTITY 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm05/store.db dir, Total Num: 0, files: 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm05/store.db: 000004.log size: 511 ; 2026-04-15T14:12:54.936 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.error_if_exists: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.create_if_missing: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.paranoid_checks: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.flush_verify_memtable_count: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.env: 0x5608a680b440 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.fs: PosixFileSystem 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.info_log: 0x5608a7b2df00 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_file_opening_threads: 16 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.statistics: (nil) 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.use_fsync: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_log_file_size: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.log_file_time_to_roll: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.keep_log_file_num: 1000 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.recycle_log_file_num: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.allow_fallocate: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.allow_mmap_reads: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.allow_mmap_writes: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.use_direct_reads: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.create_missing_column_families: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.db_log_dir: 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.wal_dir: 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.table_cache_numshardbits: 6 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.WAL_ttl_seconds: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.WAL_size_limit_MB: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.is_fd_close_on_exec: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.advise_random_on_open: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.db_write_buffer_size: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.write_buffer_manager: 0x5608a7b30500 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.use_adaptive_mutex: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.rate_limiter: (nil) 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.wal_recovery_mode: 2 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.enable_thread_tracking: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.enable_pipelined_write: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.unordered_write: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.row_cache: None 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.wal_filter: None 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.allow_ingest_behind: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.two_write_queues: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.manual_wal_flush: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.wal_compression: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.atomic_flush: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.persist_stats_to_disk: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.write_dbid_to_manifest: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.log_readahead_size: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.best_efforts_recovery: 0 2026-04-15T14:12:54.937 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.allow_data_in_errors: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.db_host_id: __hostname__ 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.enforce_single_del_contracts: true 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_background_jobs: 2 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_background_compactions: -1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_subcompactions: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.delayed_write_rate : 16777216 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_total_wal_size: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.stats_dump_period_sec: 600 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.stats_persist_period_sec: 600 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_open_files: -1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bytes_per_sync: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.wal_bytes_per_sync: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.strict_bytes_per_sync: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_readahead_size: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_background_flushes: -1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Compression algorithms supported: 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kZSTD supported: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kXpressCompression supported: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kBZip2Compression supported: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kLZ4Compression supported: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kZlibCompression supported: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kLZ4HCCompression supported: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: kSnappyCompression supported: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Fast CRC32 supported: Supported on x86 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: DMutex implementation: pthread_mutex_t 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm05/store.db/MANIFEST-000005 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.merge_operator: 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_filter: None 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_filter_factory: None 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.sst_partitioner_factory: None 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.memtable_factory: SkipListFactory 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.table_factory: BlockBasedTable 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5608a7b2d980) 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: cache_index_and_filter_blocks: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: pin_top_level_index_and_filter: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: index_type: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: data_block_index_type: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: index_shortening: 1 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: data_block_hash_table_util_ratio: 0.750000 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: checksum: 4 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: no_block_cache: 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_cache: 0x5608a7b23a30 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_cache_name: BinnedLRUCache 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_cache_options: 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: capacity : 536870912 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: num_shard_bits : 4 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: strict_capacity_limit : 0 2026-04-15T14:12:54.938 INFO:journalctl@ceph.mon.vm05.vm05.stdout: high_pri_pool_ratio: 0.000 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_cache_compressed: (nil) 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: persistent_cache: (nil) 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_size: 4096 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_size_deviation: 10 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_restart_interval: 16 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: index_block_restart_interval: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: metadata_block_size: 4096 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: partition_filters: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: use_delta_encoding: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: filter_policy: bloomfilter 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: whole_key_filtering: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: verify_compression: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: read_amp_bytes_per_bit: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: format_version: 5 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: enable_index_compression: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: block_align: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: max_auto_readahead_size: 262144 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: prepopulate_block_cache: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: initial_auto_readahead_size: 8192 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout: num_file_reads_for_auto_readahead: 2 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.write_buffer_size: 33554432 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_write_buffer_number: 2 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression: NoCompression 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression: Disabled 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.prefix_extractor: nullptr 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.num_levels: 7 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.window_bits: -14 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.level: 32767 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.strategy: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.enabled: false 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.target_file_size_base: 67108864 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.target_file_size_multiplier: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-15T14:12:54.939 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.arena_block_size: 1048576 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.disable_auto_compactions: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.inplace_update_support: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.inplace_update_num_locks: 10000 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.memtable_huge_page_size: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.bloom_locality: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.max_successive_merges: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.optimize_filters_for_hits: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.paranoid_file_checks: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.force_consistency_checks: 1 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.report_bg_io_stats: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.ttl: 2592000 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.periodic_compaction_seconds: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.enable_blob_files: false 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.min_blob_size: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.blob_file_size: 268435456 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.blob_compression_type: NoCompression 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.enable_blob_garbage_collection: false 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.blob_file_starting_level: 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm05/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 8c4f909a-660d-41dc-a01b-ca03be8d10bb 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776262374603956, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776262374604581, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1776262374, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "8c4f909a-660d-41dc-a01b-ca03be8d10bb", "db_session_id": "SR3NS5Z17L4FI8JKWDO3", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776262374604648, "job": 1, "event": "recovery_finished"} 2026-04-15T14:12:54.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm05/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5608a7b4ee00 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: DB pointer 0x5608a7b9e000 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: ** DB Stats ** 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: ** Compaction Stats [default] ** 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.5 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.5 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.5 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: ** Compaction Stats [default] ** 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.5 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: AddFile(Keys): cumulative 0, interval 0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Cumulative compaction: 0.00 GB write, 0.37 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Interval compaction: 0.00 GB write, 0.37 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Block cache BinnedLRUCache@0x5608a7b23a30#2 capacity: 512.00 MB usage: 0.22 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1e-05 secs_since: 0 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: ** File Read Latency Histogram By Level [default] ** 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05 does not exist in monmap, will attempt to join an existing cluster 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: using public_addr v2:192.168.123.105:0/0 -> [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: starting mon.vm05 rank -1 at public addrs [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] at bind addrs [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon_data /var/lib/ceph/mon/ceph-vm05 fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(???) e0 preinit fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/1714227711' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14170 192.168.123.104:0/1713471667' entity='mgr.vm04.ycniad' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).mds e1 new map 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mgrmap e14: vm04.ycniad(active, since 30s) 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).mds e1 print_map 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: e1 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: btime 2026-04-15T14:11:13:006993+0000 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-04-15T14:12:54.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout: legacy client fscid: -1 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout: 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout: No filesystems configured 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/4278484000' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3971583649' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/4106566592' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/207370587' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/617229888' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/1694948806' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Active manager daemon vm04.ycniad restarted 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Activating manager daemon vm04.ycniad 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: osdmap e5: 0 total, 0 up, 0 in 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mgrmap e15: vm04.ycniad(active, starting, since 0.00555217s) 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr metadata", "who": "vm04.ycniad", "id": "vm04.ycniad"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Manager daemon vm04.ycniad is now available 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/mirror_snapshot_schedule"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm04.ycniad/trash_purge_schedule"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/2303848615' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mgrmap e16: vm04.ycniad(active, since 1.0103s) 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: [15/Apr/2026:14:12:45] ENGINE Bus STARTING 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: [15/Apr/2026:14:12:45] ENGINE Serving on http://192.168.123.104:8765 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: [15/Apr/2026:14:12:45] ENGINE Serving on https://192.168.123.104:7150 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: [15/Apr/2026:14:12:45] ENGINE Bus STARTED 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: [15/Apr/2026:14:12:45] ENGINE Client ('192.168.123.104', 53276) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3250949836' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mgrmap e17: vm04.ycniad(active, since 2s) 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm04:/etc/ceph/ceph.conf 2026-04-15T14:12:54.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm05:/etc/ceph/ceph.conf 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm05:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm04:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm05:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.client.admin.keyring 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Updating vm04:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.client.admin.keyring 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm05", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm05", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3022857473' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Deploying daemon ceph-exporter.vm05 on vm05 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm05", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm05", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Deploying daemon crash.vm05 on vm05 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3862752732' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Deploying daemon node-exporter.vm05 on vm05 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/1046433175' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm05.ozgwuj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm05.ozgwuj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: Deploying daemon mgr.vm05.ozgwuj on vm05 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/1563662373' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:12:54.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:54 vm05 ceph-mon[57841]: mon.vm05@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-04-15T14:12:55.753 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T14:12:55.753 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mon dump -f json 2026-04-15T14:12:55.895 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm05/config 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: mon.vm04 calling monitor election 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: mon.vm05 calling monitor election 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: mon.vm04 is new leader, mons vm04,vm05 in quorum (ranks 0,1) 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: monmap epoch 2 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: last_changed 2026-04-15T14:12:54.724655+0000 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: created 2026-04-15T14:11:11.742276+0000 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: min_mon_release 20 (tentacle) 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: election_strategy: 1 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: 1: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.vm05 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: fsmap 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: osdmap e5: 0 total, 0 up, 0 in 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: mgrmap e17: vm04.ycniad(active, since 15s) 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: overall HEALTH_OK 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:12:59 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: mon.vm04 calling monitor election 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: mon.vm05 calling monitor election 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: mon.vm04 is new leader, mons vm04,vm05 in quorum (ranks 0,1) 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: monmap epoch 2 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: last_changed 2026-04-15T14:12:54.724655+0000 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: created 2026-04-15T14:11:11.742276+0000 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: min_mon_release 20 (tentacle) 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: election_strategy: 1 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-15T14:13:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: 1: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.vm05 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: fsmap 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: osdmap e5: 0 total, 0 up, 0 in 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: mgrmap e17: vm04.ycniad(active, since 15s) 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: overall HEALTH_OK 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:12:59 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:00.755 INFO:teuthology.orchestra.run.vm05.stdout: 2026-04-15T14:13:00.755 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":2,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","modified":"2026-04-15T14:12:54.724655Z","created":"2026-04-15T14:11:11.742276Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm05","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:3300","nonce":0},{"type":"v1","addr":"192.168.123.105:6789","nonce":0}]},"addr":"192.168.123.105:6789/0","public_addr":"192.168.123.105:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-04-15T14:13:00.755 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 2 2026-04-15T14:13:00.815 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-04-15T14:13:00.815 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph config generate-minimal-conf 2026-04-15T14:13:00.964 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Updating vm04:/etc/ceph/ceph.conf 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Updating vm05:/etc/ceph/ceph.conf 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Updating vm05:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Updating vm04:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Reconfiguring mon.vm04 (unknown last config time)... 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Reconfiguring daemon mon.vm04 on vm04 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Reconfiguring mgr.vm04.ycniad (unknown last config time)... 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ycniad", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Reconfiguring daemon mgr.vm04.ycniad on vm04 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Reconfiguring ceph-exporter.vm04 (monmap changed)... 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: Reconfiguring daemon ceph-exporter.vm04 on vm04 2026-04-15T14:13:01.335 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:01 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/110128212' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:13:01.386 INFO:teuthology.orchestra.run.vm04.stdout:# minimal ceph.conf for d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:13:01.387 INFO:teuthology.orchestra.run.vm04.stdout:[global] 2026-04-15T14:13:01.387 INFO:teuthology.orchestra.run.vm04.stdout: fsid = d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:13:01.387 INFO:teuthology.orchestra.run.vm04.stdout: mon_host = [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] 2026-04-15T14:13:01.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Updating vm04:/etc/ceph/ceph.conf 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Updating vm05:/etc/ceph/ceph.conf 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Updating vm05:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Updating vm04:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/config/ceph.conf 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Reconfiguring mon.vm04 (unknown last config time)... 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Reconfiguring daemon mon.vm04 on vm04 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Reconfiguring mgr.vm04.ycniad (unknown last config time)... 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ycniad", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Reconfiguring daemon mgr.vm04.ycniad on vm04 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Reconfiguring ceph-exporter.vm04 (monmap changed)... 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: Reconfiguring daemon ceph-exporter.vm04 on vm04 2026-04-15T14:13:01.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:01 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/110128212' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T14:13:01.489 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-04-15T14:13:01.490 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:13:01.490 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.conf 2026-04-15T14:13:01.516 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:13:01.516 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-15T14:13:01.583 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:13:01.583 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.conf 2026-04-15T14:13:01.609 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:13:01.616 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-15T14:13:01.676 DEBUG:tasks.cephadm:set 0 configs 2026-04-15T14:13:01.676 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph config dump 2026-04-15T14:13:01.822 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:02.491 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.491 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.491 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: Reconfiguring crash.vm04 (monmap changed)... 2026-04-15T14:13:02.491 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T14:13:02.491 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:02.491 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: Reconfiguring daemon crash.vm04 on vm04 2026-04-15T14:13:02.491 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/975803131' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:02.492 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.492 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.492 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: Reconfiguring alertmanager.vm04 deps ['mgr.vm04.ycniad', 'secure_monitoring_stack:False'] -> ['alertmanager.vm04', 'mgr.vm04.ycniad', 'mgr.vm05.ozgwuj', 'secure_monitoring_stack:False'] (diff {'alertmanager.vm04', 'mgr.vm05.ozgwuj'}) 2026-04-15T14:13:02.492 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:02 vm04 ceph-mon[53345]: Reconfiguring daemon alertmanager.vm04 on vm04 2026-04-15T14:13:02.678 INFO:teuthology.orchestra.run.vm04.stdout:WHO MASK LEVEL OPTION VALUE RO 2026-04-15T14:13:02.678 INFO:teuthology.orchestra.run.vm04.stdout:global dev auth_debug true 2026-04-15T14:13:02.678 INFO:teuthology.orchestra.run.vm04.stdout:global basic container_image harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9 * 2026-04-15T14:13:02.678 INFO:teuthology.orchestra.run.vm04.stdout:global dev debug_asserts_on_shutdown true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global basic log_to_file true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global basic log_to_journald false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global basic log_to_stderr false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_allow_pool_delete true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_clock_drift_allowed 1.000000 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_cluster_log_to_file true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_max_pg_per_osd 10000 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_pg_warn_max_object_skew 0.000000 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_warn_on_crush_straw_calc_version_zero false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_warn_on_legacy_crush_tunables false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_warn_on_osd_down_out_interval_zero false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global dev mon_warn_on_pool_pg_num_not_power_of_two false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced mon_warn_on_too_few_osds false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global dev ms_die_on_bug true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global dev ms_die_on_old_message true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced osd_pool_default_erasure_code_profile plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced osd_pool_default_pg_autoscale_mode off 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:global advanced public_network 192.168.123.0/24 * 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced auth_allow_insecure_global_id_reclaim false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced auth_mon_ticket_ttl 660.000000 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced auth_service_ticket_ttl 240.000000 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced debug_mon 20/20 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced debug_ms 1/1 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced debug_paxos 20/20 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced mon_data_avail_warn 5 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced mon_mgr_mkfs_grace 240 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon dev mon_osd_prime_pg_temp true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced mon_osd_reporter_subtree_level osd 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced mon_reweight_min_bytes_per_osd 10 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced mon_reweight_min_pgs_per_osd 4 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim_allowed false 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced debug_mgr 20/20 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced debug_ms 1/1 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mgr/cephadm/allow_ptrace true * 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mgr/cephadm/container_init True * 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mgr/cephadm/migration_current 7 * 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mgr/dashboard/GRAFANA_API_SSL_VERIFY false * 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mgr/dashboard/ssl_server_port 8443 * 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mgr/orchestrator/orchestrator cephadm 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mon_reweight_min_bytes_per_osd 10 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:mgr advanced mon_reweight_min_pgs_per_osd 4 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:osd dev bdev_debug_aio true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced debug_ms 1/1 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced debug_osd 20/20 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:osd dev osd_debug_misdirected_ops true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:osd dev osd_debug_op_order true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:osd dev osd_debug_pg_log_writeout true 2026-04-15T14:13:02.679 INFO:teuthology.orchestra.run.vm04.stdout:osd dev osd_debug_shutdown true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd dev osd_debug_verify_cached_snaps true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd dev osd_debug_verify_missing_on_start true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd dev osd_debug_verify_stray_on_activate true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_deep_scrub_update_digest_min_age 30 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd basic osd_mclock_iops_capacity_threshold_hdd 49000.000000 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_mclock_profile high_recovery_ops 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_memory_target_autotune true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_op_queue debug_random * 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_op_queue_cut_off debug_random * 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_recover_clone_overlap true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_recovery_max_chunk 1048576 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_scrub_load_threshold 5.000000 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_scrub_max_interval 600.000000 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:osd advanced osd_shutdown_pgref_assert true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:client.rgw advanced rgw_cache_enabled true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:client.rgw advanced rgw_enable_ops_log true 2026-04-15T14:13:02.680 INFO:teuthology.orchestra.run.vm04.stdout:client.rgw advanced rgw_enable_usage_log true 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: Reconfiguring crash.vm04 (monmap changed)... 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: Reconfiguring daemon crash.vm04 on vm04 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/975803131' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: Reconfiguring alertmanager.vm04 deps ['mgr.vm04.ycniad', 'secure_monitoring_stack:False'] -> ['alertmanager.vm04', 'mgr.vm04.ycniad', 'mgr.vm05.ozgwuj', 'secure_monitoring_stack:False'] (diff {'alertmanager.vm04', 'mgr.vm05.ozgwuj'}) 2026-04-15T14:13:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:02 vm05 ceph-mon[57841]: Reconfiguring daemon alertmanager.vm04 on vm04 2026-04-15T14:13:02.747 INFO:tasks.cephadm:Deploying OSDs... 2026-04-15T14:13:02.747 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:13:02.747 DEBUG:teuthology.orchestra.run.vm04:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T14:13:02.772 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T14:13:02.772 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme0n1 2026-04-15T14:13:02.831 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme0n1 2026-04-15T14:13:02.831 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:02.832 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 987 Links: 1 Device type: 103,0 2026-04-15T14:13:02.832 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:02.832 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:02.832 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:12:09.424447815 +0000 2026-04-15T14:13:02.832 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:19.496640756 +0000 2026-04-15T14:13:02.832 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:19.496640756 +0000 2026-04-15T14:13:02.832 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:18.613639872 +0000 2026-04-15T14:13:02.832 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-15T14:13:02.904 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:13:02.904 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:13:02.904 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000234173 s, 2.2 MB/s 2026-04-15T14:13:02.905 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-15T14:13:02.960 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme1n1 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme1n1 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 999 Links: 1 Device type: 103,3 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:12:09.458447863 +0000 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:19.927641187 +0000 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:19.927641187 +0000 2026-04-15T14:13:03.016 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:18.750640009 +0000 2026-04-15T14:13:03.017 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-15T14:13:03.081 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:13:03.081 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:13:03.081 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000131117 s, 3.9 MB/s 2026-04-15T14:13:03.082 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-15T14:13:03.141 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme2n1 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme2n1 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 1011 Links: 1 Device type: 103,5 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:12:09.485447900 +0000 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:20.396889812 +0000 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:20.396889812 +0000 2026-04-15T14:13:03.208 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:18.895640154 +0000 2026-04-15T14:13:03.209 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-15T14:13:03.275 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:13:03.275 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:13:03.275 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000228252 s, 2.2 MB/s 2026-04-15T14:13:03.277 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-15T14:13:03.336 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme3n1 2026-04-15T14:13:03.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:03 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3723993799' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-15T14:13:03.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:03 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:03.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:03 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:03.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:03 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme3n1 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 1024 Links: 1 Device type: 103,7 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-15 14:12:09.514447940 +0000 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-15 14:10:20.834140130 +0000 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-15 14:10:20.834140130 +0000 2026-04-15T14:13:03.387 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-04-15 14:10:19.043640302 +0000 2026-04-15T14:13:03.388 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-15T14:13:03.464 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-15T14:13:03.464 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-15T14:13:03.464 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000496169 s, 1.0 MB/s 2026-04-15T14:13:03.467 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-15T14:13:03.524 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:13:03.525 DEBUG:teuthology.orchestra.run.vm05:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T14:13:03.541 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T14:13:03.541 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/nvme0n1 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/nvme0n1 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 999 Links: 1 Device type: 103,1 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:12:46.936780076 +0000 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:22.912338816 +0000 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:22.912338816 +0000 2026-04-15T14:13:03.598 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:21.992242510 +0000 2026-04-15T14:13:03.598 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-15T14:13:03.661 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:03 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3723993799' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-15T14:13:03.661 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:03 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:03.661 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:03 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:03.661 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:03 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T14:13:03.663 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:13:03.664 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:13:03.664 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000181566 s, 2.8 MB/s 2026-04-15T14:13:03.665 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-15T14:13:03.724 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/nvme1n1 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/nvme1n1 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 1011 Links: 1 Device type: 103,2 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:12:46.969781167 +0000 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:23.342385470 +0000 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:23.342385470 +0000 2026-04-15T14:13:03.782 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:22.157256900 +0000 2026-04-15T14:13:03.782 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-15T14:13:03.847 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:13:03.847 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:13:03.847 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000167509 s, 3.1 MB/s 2026-04-15T14:13:03.848 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-15T14:13:03.908 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/nvme2n1 2026-04-15T14:13:03.969 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/nvme2n1 2026-04-15T14:13:03.969 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:03.970 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 1023 Links: 1 Device type: 103,4 2026-04-15T14:13:03.970 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:03.970 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:03.970 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:12:47.011782556 +0000 2026-04-15T14:13:03.970 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:23.765431365 +0000 2026-04-15T14:13:03.970 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:23.765431365 +0000 2026-04-15T14:13:03.970 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:22.283270571 +0000 2026-04-15T14:13:03.970 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-15T14:13:04.035 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:13:04.035 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:13:04.035 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000197826 s, 2.6 MB/s 2026-04-15T14:13:04.036 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-15T14:13:04.094 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/nvme3n1 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/nvme3n1 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 1035 Links: 1 Device type: 103,6 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-04-15 14:12:47.042783581 +0000 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-04-15 14:10:24.184476826 +0000 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-04-15 14:10:24.184476826 +0000 2026-04-15T14:13:04.156 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-04-15 14:10:22.402283482 +0000 2026-04-15T14:13:04.156 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-15T14:13:04.223 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-04-15T14:13:04.223 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-04-15T14:13:04.223 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000233725 s, 2.2 MB/s 2026-04-15T14:13:04.224 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-15T14:13:04.283 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch apply osd --all-available-devices 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: Reconfiguring grafana.vm04 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm04', 'secure_monitoring_stack:False'] (diff {'prometheus.vm04'}) 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: Reconfiguring daemon grafana.vm04 on vm04 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: Standby manager daemon vm05.ozgwuj started 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm05.ozgwuj/crt"} : dispatch 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm05.ozgwuj/key"} : dispatch 2026-04-15T14:13:04.350 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:04 vm05 ceph-mon[57841]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: Reconfiguring grafana.vm04 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm04', 'secure_monitoring_stack:False'] (diff {'prometheus.vm04'}) 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: Reconfiguring daemon grafana.vm04 on vm04 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: Standby manager daemon vm05.ozgwuj started 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm05.ozgwuj/crt"} : dispatch 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm05.ozgwuj/key"} : dispatch 2026-04-15T14:13:04.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:04 vm04 ceph-mon[53345]: from='mgr.? 192.168.123.105:0/525043709' entity='mgr.vm05.ozgwuj' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-15T14:13:04.465 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm05/config 2026-04-15T14:13:04.879 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled osd.all-available-devices update... 2026-04-15T14:13:04.929 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-04-15T14:13:04.929 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:05.091 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:05.515 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:05.574 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T14:13:05.801 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: Reconfiguring prometheus.vm04 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'ceph-exporter.vm05', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm05'}) 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: Reconfiguring daemon prometheus.vm04 on vm04 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm05", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: mgrmap e18: vm04.ycniad(active, since 20s), standbys: vm05.ozgwuj 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr metadata", "who": "vm05.ozgwuj", "id": "vm05.ozgwuj"} : dispatch 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm05", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:05.802 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:05 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/925681451' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:05.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: Reconfiguring prometheus.vm04 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'ceph-exporter.vm05', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm05'}) 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: Reconfiguring daemon prometheus.vm04 on vm04 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm05", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: mgrmap e18: vm04.ycniad(active, since 20s), standbys: vm05.ozgwuj 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr metadata", "who": "vm05.ozgwuj", "id": "vm05.ozgwuj"} : dispatch 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm05", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:05 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/925681451' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:06.574 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:06.715 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='client.24099 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Marking host: vm04 for OSDSpec preview refresh. 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Marking host: vm05 for OSDSpec preview refresh. 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Saving service osd.all-available-devices spec with placement * 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Reconfiguring ceph-exporter.vm05 (monmap changed)... 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Reconfiguring daemon ceph-exporter.vm05 on vm05 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Reconfiguring crash.vm05 (monmap changed)... 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Reconfiguring daemon crash.vm05 on vm05 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Reconfiguring mgr.vm05.ozgwuj (monmap changed)... 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm05.ozgwuj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: Reconfiguring daemon mgr.vm05.ozgwuj on vm05 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T14:13:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm04.local:3000"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm04.local:9095"} : dispatch 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:06 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='client.24099 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Marking host: vm04 for OSDSpec preview refresh. 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Marking host: vm05 for OSDSpec preview refresh. 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Saving service osd.all-available-devices spec with placement * 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Reconfiguring ceph-exporter.vm05 (monmap changed)... 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Reconfiguring daemon ceph-exporter.vm05 on vm05 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Reconfiguring crash.vm05 (monmap changed)... 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Reconfiguring daemon crash.vm05 on vm05 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Reconfiguring mgr.vm05.ozgwuj (monmap changed)... 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm05.ozgwuj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: Reconfiguring daemon mgr.vm05.ozgwuj on vm05 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"} : dispatch 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm04.local:3000"} : dispatch 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm04.local:9095"} : dispatch 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:06.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:06 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:13:07.104 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:07.185 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: Reconfiguring mon.vm05 (monmap changed)... 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: Reconfiguring daemon mon.vm05 on vm05 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm04.local:3000"}]: dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm04.local:9095"}]: dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: Certificate for "grafana_cert (vm04)" is still valid for 1094 days. 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1549931029' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.636 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:07 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: Reconfiguring mon.vm05 (monmap changed)... 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: Reconfiguring daemon mon.vm05 on vm05 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm04.local:3000"}]: dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm04.local:9095"}]: dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: Certificate for "grafana_cert (vm04)" is still valid for 1094 days. 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1549931029' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:07 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:13:08.186 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:08.324 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:08.757 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:08.806 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:13:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T14:13:08.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:08.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T14:13:08.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:08.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:08 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:13:09.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:13:09.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:09.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:09.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:09.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:08 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1337238356' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3677709521' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc"} : dispatch 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc"} : dispatch 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc"}]': finished 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: osdmap e6: 1 total, 0 up, 1 in 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:09.750 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/931275741' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "fc93bc36-2a43-45af-8a80-9f98b42d7ef3"} : dispatch 2026-04-15T14:13:09.751 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/931275741' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fc93bc36-2a43-45af-8a80-9f98b42d7ef3"}]': finished 2026-04-15T14:13:09.751 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: osdmap e7: 2 total, 0 up, 2 in 2026-04-15T14:13:09.751 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:09.751 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:09 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:09.807 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:09.817 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:13:09.817 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:09.817 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1337238356' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:09.817 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3677709521' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc"} : dispatch 2026-04-15T14:13:09.817 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc"} : dispatch 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc"}]': finished 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: osdmap e6: 1 total, 0 up, 1 in 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/931275741' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "fc93bc36-2a43-45af-8a80-9f98b42d7ef3"} : dispatch 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/931275741' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fc93bc36-2a43-45af-8a80-9f98b42d7ef3"}]': finished 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: osdmap e7: 2 total, 0 up, 2 in 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:09.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:09 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:09.957 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:10.344 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:10.436 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1776262389,"num_remapped_pgs":0} 2026-04-15T14:13:10.857 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:10 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3329876548' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:10.857 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:10 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1065493756' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:10.857 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:10 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/58601760' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:10.935 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:10 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3329876548' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:10.935 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:10 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1065493756' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:10.935 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:10 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/58601760' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:11.437 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:11.576 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/2785908972' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "59fc7e9a-9099-4253-90bf-a48986abab86"} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "59fc7e9a-9099-4253-90bf-a48986abab86"} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "59fc7e9a-9099-4253-90bf-a48986abab86"}]': finished 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: osdmap e8: 3 total, 0 up, 3 in 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3369132953' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "156ad8a0-971a-4d3b-b8e3-a29bb406fc10"} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3369132953' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "156ad8a0-971a-4d3b-b8e3-a29bb406fc10"}]': finished 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: osdmap e9: 4 total, 0 up, 4 in 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/2896362410' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:11.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:11 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3420430793' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/2785908972' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "59fc7e9a-9099-4253-90bf-a48986abab86"} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "59fc7e9a-9099-4253-90bf-a48986abab86"} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "59fc7e9a-9099-4253-90bf-a48986abab86"}]': finished 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: osdmap e8: 3 total, 0 up, 3 in 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3369132953' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "156ad8a0-971a-4d3b-b8e3-a29bb406fc10"} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3369132953' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "156ad8a0-971a-4d3b-b8e3-a29bb406fc10"}]': finished 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: osdmap e9: 4 total, 0 up, 4 in 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/2896362410' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:11 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3420430793' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:11.970 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:12.024 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1776262390,"num_remapped_pgs":0} 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/2372822117' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3912989952' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a"} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a"} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a"}]': finished 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: osdmap e10: 5 total, 0 up, 5 in 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/4212305058' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "93129f51-c10e-496f-a439-6e42f717437c"} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/4212305058' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "93129f51-c10e-496f-a439-6e42f717437c"}]': finished 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: osdmap e11: 6 total, 0 up, 6 in 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:12.771 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:12 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2372822117' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3912989952' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a"} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a"} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a"}]': finished 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: osdmap e10: 5 total, 0 up, 5 in 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4212305058' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "93129f51-c10e-496f-a439-6e42f717437c"} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4212305058' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "93129f51-c10e-496f-a439-6e42f717437c"}]': finished 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: osdmap e11: 6 total, 0 up, 6 in 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:12.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:12 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:13.025 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:13.174 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:13.553 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:13.608 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1776262392,"num_remapped_pgs":0} 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3560788792' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1305169875' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/609017945' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/475341905' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "801ea36c-499d-4470-8775-037ff667b7b3"} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3842907762' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "b8b34729-ecc2-43d4-a24f-afe5c06feaac"} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/475341905' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "801ea36c-499d-4470-8775-037ff667b7b3"}]': finished 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: osdmap e12: 7 total, 0 up, 7 in 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "b8b34729-ecc2-43d4-a24f-afe5c06feaac"} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b8b34729-ecc2-43d4-a24f-afe5c06feaac"}]': finished 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: osdmap e13: 8 total, 0 up, 8 in 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:13.666 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:13 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:13.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3560788792' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1305169875' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/609017945' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/475341905' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "801ea36c-499d-4470-8775-037ff667b7b3"} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3842907762' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "b8b34729-ecc2-43d4-a24f-afe5c06feaac"} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/475341905' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "801ea36c-499d-4470-8775-037ff667b7b3"}]': finished 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: osdmap e12: 7 total, 0 up, 7 in 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "b8b34729-ecc2-43d4-a24f-afe5c06feaac"} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b8b34729-ecc2-43d4-a24f-afe5c06feaac"}]': finished 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: osdmap e13: 8 total, 0 up, 8 in 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:13.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:13.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:13 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:14.609 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:14.773 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:14.893 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:14 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/324140431' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:14.893 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:14 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2772259666' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:14.893 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:14 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:14 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/324140431' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:14 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/2772259666' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T14:13:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:14 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:15.265 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:15.327 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:16.034 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:15 vm05 ceph-mon[57841]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:16.034 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:15 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3525207602' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:16.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:15 vm04 ceph-mon[53345]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:16.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:15 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3525207602' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:16.328 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:16.487 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:16.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:16 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-15T14:13:16.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:16 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:16.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:16 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-15T14:13:16.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:16 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:16.872 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:16 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-15T14:13:16.872 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:16 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:16.872 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:16 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-15T14:13:16.872 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:16 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:16.886 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:16.960 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:17.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:17 vm05 ceph-mon[57841]: Deploying daemon osd.0 on vm05 2026-04-15T14:13:17.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:17 vm05 ceph-mon[57841]: Deploying daemon osd.1 on vm04 2026-04-15T14:13:17.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:17 vm05 ceph-mon[57841]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:17.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:17 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3000348834' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:17.961 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:17.990 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:17 vm04 ceph-mon[53345]: Deploying daemon osd.0 on vm05 2026-04-15T14:13:17.990 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:17 vm04 ceph-mon[53345]: Deploying daemon osd.1 on vm04 2026-04-15T14:13:17.990 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:17 vm04 ceph-mon[53345]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:17.990 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:17 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3000348834' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:18.126 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:18.581 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:18.645 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:18.800 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:18 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3820099853' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:18.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:18 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3820099853' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:19.645 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:19.792 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]} : dispatch 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:19.909 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:19 vm04 ceph-mon[53345]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]} : dispatch 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]} : dispatch 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:19.960 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:19 vm05 ceph-mon[57841]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]} : dispatch 2026-04-15T14:13:20.176 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:20.244 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":14,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: Deploying daemon osd.2 on vm05 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: Deploying daemon osd.3 on vm04 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: osdmap e14: 8 total, 0 up, 8 in 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:20.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:20 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2174862415' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: Deploying daemon osd.2 on vm05 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: Deploying daemon osd.3 on vm04 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: osdmap e14: 8 total, 0 up, 8 in 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:20 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/2174862415' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:21.245 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:21.442 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: osdmap e15: 8 total, 0 up, 8 in 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:21.830 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:21 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:21.895 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: osdmap e15: 8 total, 0 up, 8 in 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:21.897 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:21 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:22.018 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":15,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:22.782 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:22.782 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:22.782 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:22.782 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:22.782 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/254670618' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:23.019 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='osd.2 [v2:192.168.123.105:6808/2685562761,v1:192.168.123.105:6809/2685562761]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-15T14:13:23.047 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:22 vm05 ceph-mon[57841]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]} : dispatch 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/254670618' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560]' entity='osd.1' 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308]' entity='osd.0' 2026-04-15T14:13:23.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='osd.2 [v2:192.168.123.105:6808/2685562761,v1:192.168.123.105:6809/2685562761]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-15T14:13:23.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:23.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-15T14:13:23.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:23.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-15T14:13:23.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:22 vm04 ceph-mon[53345]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]} : dispatch 2026-04-15T14:13:23.177 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:23.587 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:23.663 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":16,"num_osds":8,"num_up_osds":2,"osd_up_since":1776262403,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: Deploying daemon osd.4 on vm05 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: Deploying daemon osd.5 on vm04 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560] boot 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308] boot 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: osdmap e16: 8 total, 2 up, 8 in 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='osd.2 [v2:192.168.123.105:6808/2685562761,v1:192.168.123.105:6809/2685562761]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:23 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2719904349' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: Deploying daemon osd.4 on vm05 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: Deploying daemon osd.5 on vm04 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: osd.1 [v2:192.168.123.104:6802/2939922560,v1:192.168.123.104:6803/2939922560] boot 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: osd.0 [v2:192.168.123.105:6800/1390887308,v1:192.168.123.105:6801/1390887308] boot 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: osdmap e16: 8 total, 2 up, 8 in 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='osd.2 [v2:192.168.123.105:6808/2685562761,v1:192.168.123.105:6809/2685562761]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:23 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/2719904349' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:24.664 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:24.813 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:25.234 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:25.281 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: pgmap v24: 0 pgs: ; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: osdmap e17: 8 total, 2 up, 8 in 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='osd.2 ' entity='osd.2' 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-15T14:13:25.282 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:25 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:25.298 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":18,"num_osds":8,"num_up_osds":4,"osd_up_since":1776262405,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:25.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: pgmap v24: 0 pgs: ; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail 2026-04-15T14:13:25.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:25.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:25.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: osdmap e17: 8 total, 2 up, 8 in 2026-04-15T14:13:25.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:25.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='osd.2 ' entity='osd.2' 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-15T14:13:25.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:25 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:26.257 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: Deploying daemon osd.7 on vm05 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038] boot 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: osd.2 [v2:192.168.123.105:6808/2685562761,v1:192.168.123.105:6809/2685562761] boot 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: osdmap e18: 8 total, 4 up, 8 in 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/955297783' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='osd.4 [v2:192.168.123.105:6816/4184869825,v1:192.168.123.105:6817/4184869825]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: Deploying daemon osd.6 on vm04 2026-04-15T14:13:26.258 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:26 vm05 ceph-mon[57841]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]} : dispatch 2026-04-15T14:13:26.299 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:26.327 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: Deploying daemon osd.7 on vm05 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: osd.3 [v2:192.168.123.104:6810/552697038,v1:192.168.123.104:6811/552697038] boot 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: osd.2 [v2:192.168.123.105:6808/2685562761,v1:192.168.123.105:6809/2685562761] boot 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: osdmap e18: 8 total, 4 up, 8 in 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/955297783' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='osd.4 [v2:192.168.123.105:6816/4184869825,v1:192.168.123.105:6817/4184869825]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: Deploying daemon osd.6 on vm04 2026-04-15T14:13:26.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:26 vm04 ceph-mon[53345]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]} : dispatch 2026-04-15T14:13:26.446 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:26.975 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:27.040 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":19,"num_osds":8,"num_up_osds":4,"osd_up_since":1776262405,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:27.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: pgmap v27: 0 pgs: ; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail 2026-04-15T14:13:27.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-04-15T14:13:27.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-04-15T14:13:27.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: osdmap e19: 8 total, 4 up, 8 in 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='osd.4 [v2:192.168.123.105:6816/4184869825,v1:192.168.123.105:6817/4184869825]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-15T14:13:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:27 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2814785275' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:27.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: pgmap v27: 0 pgs: ; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: osdmap e19: 8 total, 4 up, 8 in 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='osd.4 [v2:192.168.123.105:6816/4184869825,v1:192.168.123.105:6817/4184869825]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-15T14:13:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:27 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/2814785275' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:28.040 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:28.209 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:28.331 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:28.331 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: osdmap e20: 8 total, 4 up, 8 in 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:28.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:28 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: osdmap e20: 8 total, 4 up, 8 in 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:28.394 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:28 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:28.606 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:28.670 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":21,"num_osds":8,"num_up_osds":4,"osd_up_since":1776262405,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:29.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 sudo[79293]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-04-15T14:13:29.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 sudo[79293]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-04-15T14:13:29.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 sudo[79293]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-04-15T14:13:29.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 sudo[79293]: pam_unix(sudo:session): session closed for user root 2026-04-15T14:13:29.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: pgmap v30: 1 pgs: 1 unknown; 0 B data, 905 MiB used, 79 GiB / 80 GiB avail 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: osdmap e21: 8 total, 4 up, 8 in 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='osd.4 ' entity='osd.4' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='osd.7 [v2:192.168.123.105:6824/266427133,v1:192.168.123.105:6825/266427133]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4255111783' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='osd.6 [v2:192.168.123.104:6826/3842176150,v1:192.168.123.104:6827/3842176150]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:29.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:29 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 sudo[68403]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 sudo[68403]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 sudo[68403]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 sudo[68403]: pam_unix(sudo:session): session closed for user root 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: pgmap v30: 1 pgs: 1 unknown; 0 B data, 905 MiB used, 79 GiB / 80 GiB avail 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: osdmap e21: 8 total, 4 up, 8 in 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='osd.4 ' entity='osd.4' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349]' entity='osd.5' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='osd.7 [v2:192.168.123.105:6824/266427133,v1:192.168.123.105:6825/266427133]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/4255111783' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='osd.6 [v2:192.168.123.104:6826/3842176150,v1:192.168.123.104:6827/3842176150]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "mon metadata", "id": "vm05"} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:29.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:29 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:29.671 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:29.843 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:30.239 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:30.318 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":22,"num_osds":8,"num_up_osds":6,"osd_up_since":1776262409,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":0} 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: osd.4 [v2:192.168.123.105:6816/4184869825,v1:192.168.123.105:6817/4184869825] boot 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349] boot 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: osdmap e22: 8 total, 6 up, 8 in 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='osd.6 [v2:192.168.123.104:6826/3842176150,v1:192.168.123.104:6827/3842176150]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:30.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:30.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='osd.7 [v2:192.168.123.105:6824/266427133,v1:192.168.123.105:6825/266427133]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:30.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:30.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:30.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:30.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:30.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:30 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3514125658' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:30.657 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: osd.4 [v2:192.168.123.105:6816/4184869825,v1:192.168.123.105:6817/4184869825] boot 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: osd.5 [v2:192.168.123.104:6818/3514888349,v1:192.168.123.104:6819/3514888349] boot 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: osdmap e22: 8 total, 6 up, 8 in 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='osd.6 [v2:192.168.123.104:6826/3842176150,v1:192.168.123.104:6827/3842176150]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='osd.7 [v2:192.168.123.105:6824/266427133,v1:192.168.123.105:6825/266427133]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:30.658 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:30 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3514125658' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:31.319 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:31.496 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:31.576 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: pgmap v33: 1 pgs: 1 unknown; 0 B data, 506 MiB used, 79 GiB / 80 GiB avail 2026-04-15T14:13:31.576 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: osdmap e23: 8 total, 6 up, 8 in 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: mgrmap e19: vm04.ycniad(active, since 46s), standbys: vm05.ozgwuj 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.577 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:31 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: pgmap v33: 1 pgs: 1 unknown; 0 B data, 506 MiB used, 79 GiB / 80 GiB avail 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: osdmap e23: 8 total, 6 up, 8 in 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: mgrmap e19: vm04.ycniad(active, since 46s), standbys: vm05.ozgwuj 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.602 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:31 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:31.904 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:31.979 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":24,"num_osds":8,"num_up_osds":6,"osd_up_since":1776262409,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":1} 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: purged_snaps scrub starts 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: purged_snaps scrub ok 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: osdmap e24: 8 total, 6 up, 8 in 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='osd.7 ' entity='osd.7' 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='osd.6 ' entity='osd.6' 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1487264327' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:32.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:32 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: purged_snaps scrub starts 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: purged_snaps scrub ok 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: osdmap e24: 8 total, 6 up, 8 in 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='osd.7 ' entity='osd.7' 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='osd.6 ' entity='osd.6' 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1487264327' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:32.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:32 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:32.980 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd stat -f json 2026-04-15T14:13:33.118 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:33.522 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:33.590 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":26,"num_osds":8,"num_up_osds":8,"osd_up_since":1776262412,"num_in_osds":8,"osd_in_since":1776262393,"num_remapped_pgs":1} 2026-04-15T14:13:33.590 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd dump --format=json 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: pgmap v36: 1 pgs: 1 unknown; 0 B data, 960 MiB used, 119 GiB / 120 GiB avail 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: osd.7 [v2:192.168.123.105:6824/266427133,v1:192.168.123.105:6825/266427133] boot 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: osd.6 [v2:192.168.123.104:6826/3842176150,v1:192.168.123.104:6827/3842176150] boot 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: osdmap e25: 8 total, 8 up, 8 in 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:33.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:33 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"} : dispatch 2026-04-15T14:13:33.724 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: pgmap v36: 1 pgs: 1 unknown; 0 B data, 960 MiB used, 119 GiB / 120 GiB avail 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: osd.7 [v2:192.168.123.105:6824/266427133,v1:192.168.123.105:6825/266427133] boot 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: osd.6 [v2:192.168.123.104:6826/3842176150,v1:192.168.123.104:6827/3842176150] boot 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: osdmap e25: 8 total, 8 up, 8 in 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:33.748 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:33 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"} : dispatch 2026-04-15T14:13:34.084 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:34.084 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":26,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","created":"2026-04-15T14:11:13.007357+0000","modified":"2026-04-15T14:13:33.373100+0000","last_up_change":"2026-04-15T14:13:32.369217+0000","last_in_change":"2026-04-15T14:13:13.632102+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-15T14:13:26.340803+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"22","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6800","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6801","nonce":1390887308}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6803","nonce":1390887308}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6807","nonce":1390887308}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6805","nonce":1390887308}]},"public_addr":"192.168.123.105:6801/1390887308","cluster_addr":"192.168.123.105:6803/1390887308","heartbeat_back_addr":"192.168.123.105:6807/1390887308","heartbeat_front_addr":"192.168.123.105:6805/1390887308","state":["exists","up"]},{"osd":1,"uuid":"fc93bc36-2a43-45af-8a80-9f98b42d7ef3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6803","nonce":2939922560}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6805","nonce":2939922560}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6809","nonce":2939922560}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6807","nonce":2939922560}]},"public_addr":"192.168.123.104:6803/2939922560","cluster_addr":"192.168.123.104:6805/2939922560","heartbeat_back_addr":"192.168.123.104:6809/2939922560","heartbeat_front_addr":"192.168.123.104:6807/2939922560","state":["exists","up"]},{"osd":2,"uuid":"59fc7e9a-9099-4253-90bf-a48986abab86","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6809","nonce":2685562761}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6811","nonce":2685562761}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6815","nonce":2685562761}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6813","nonce":2685562761}]},"public_addr":"192.168.123.105:6809/2685562761","cluster_addr":"192.168.123.105:6811/2685562761","heartbeat_back_addr":"192.168.123.105:6815/2685562761","heartbeat_front_addr":"192.168.123.105:6813/2685562761","state":["exists","up"]},{"osd":3,"uuid":"156ad8a0-971a-4d3b-b8e3-a29bb406fc10","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6811","nonce":552697038}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6813","nonce":552697038}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6817","nonce":552697038}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6815","nonce":552697038}]},"public_addr":"192.168.123.104:6811/552697038","cluster_addr":"192.168.123.104:6813/552697038","heartbeat_back_addr":"192.168.123.104:6817/552697038","heartbeat_front_addr":"192.168.123.104:6815/552697038","state":["exists","up"]},{"osd":4,"uuid":"ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6817","nonce":4184869825}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6819","nonce":4184869825}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6823","nonce":4184869825}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6821","nonce":4184869825}]},"public_addr":"192.168.123.105:6817/4184869825","cluster_addr":"192.168.123.105:6819/4184869825","heartbeat_back_addr":"192.168.123.105:6823/4184869825","heartbeat_front_addr":"192.168.123.105:6821/4184869825","state":["exists","up"]},{"osd":5,"uuid":"93129f51-c10e-496f-a439-6e42f717437c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":23,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6819","nonce":3514888349}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6821","nonce":3514888349}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6825","nonce":3514888349}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6823","nonce":3514888349}]},"public_addr":"192.168.123.104:6819/3514888349","cluster_addr":"192.168.123.104:6821/3514888349","heartbeat_back_addr":"192.168.123.104:6825/3514888349","heartbeat_front_addr":"192.168.123.104:6823/3514888349","state":["exists","up"]},{"osd":6,"uuid":"801ea36c-499d-4470-8775-037ff667b7b3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6827","nonce":3842176150}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6829","nonce":3842176150}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6832","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6833","nonce":3842176150}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6831","nonce":3842176150}]},"public_addr":"192.168.123.104:6827/3842176150","cluster_addr":"192.168.123.104:6829/3842176150","heartbeat_back_addr":"192.168.123.104:6833/3842176150","heartbeat_front_addr":"192.168.123.104:6831/3842176150","state":["exists","up"]},{"osd":7,"uuid":"b8b34729-ecc2-43d4-a24f-afe5c06feaac","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6825","nonce":266427133}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6827","nonce":266427133}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6831","nonce":266427133}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6829","nonce":266427133}]},"public_addr":"192.168.123.105:6825/266427133","cluster_addr":"192.168.123.105:6827/266427133","heartbeat_back_addr":"192.168.123.105:6831/266427133","heartbeat_front_addr":"192.168.123.105:6829/266427133","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:20.136975+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:20.412493+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:23.293505+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:23.463722+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:26.459395+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:26.747709+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:29.844429+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:29.386179+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[{"pgid":"1.0","osds":[0,2]}],"primary_temp":[],"blocklist":{"192.168.123.104:6801/322276771":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/1546778517":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/1070793896":"2026-04-16T14:11:39.957718+0000","192.168.123.104:6801/8060406":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/3915671577":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6800/8060406":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/1128291451":"2026-04-16T14:12:44.048990+0000","192.168.123.104:6800/322276771":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/692560052":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/615164344":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6800/3792063836":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6801/3792063836":"2026-04-16T14:12:02.850399+0000","192.168.123.104:0/3225569276":"2026-04-16T14:12:02.850399+0000","192.168.123.104:0/1245479587":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/1116364314":"2026-04-16T14:12:44.048990+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-15T14:13:34.134 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-04-15T14:13:26.340803+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '22', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'nonprimary_shards': '{}', 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-04-15T14:13:34.134 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd pool get .mgr pg_num 2026-04-15T14:13:34.282 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: Detected new or changed devices on vm05 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: osdmap e26: 8 total, 8 up, 8 in 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4137344589' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: Detected new or changed devices on vm04 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:13:34.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:34 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3889800329' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T14:13:34.681 INFO:teuthology.orchestra.run.vm04.stdout:pg_num: 1 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: Detected new or changed devices on vm05 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: osdmap e26: 8 total, 8 up, 8 in 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/4137344589' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: Detected new or changed devices on vm04 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:13:34.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:34 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3889800329' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T14:13:34.743 INFO:tasks.cephadm:Setting up client nodes... 2026-04-15T14:13:34.743 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-15T14:13:34.893 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:35.332 INFO:teuthology.orchestra.run.vm04.stdout:[client.0] 2026-04-15T14:13:35.332 INFO:teuthology.orchestra.run.vm04.stdout: key = AQAPnd9pvLyVExAAIy4b2YI4s+FzRNqDYiCTvQ== 2026-04-15T14:13:35.388 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:13:35.388 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-04-15T14:13:35.388 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-04-15T14:13:35.428 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-15T14:13:35.583 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm05/config 2026-04-15T14:13:35.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:35 vm04 ceph-mon[53345]: pgmap v39: 1 pgs: 1 active+undersized+remapped; 577 KiB data, 1.4 GiB used, 159 GiB / 160 GiB avail 2026-04-15T14:13:35.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:35 vm04 ceph-mon[53345]: osdmap e27: 8 total, 8 up, 8 in 2026-04-15T14:13:35.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:35 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4019926341' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-15T14:13:35.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:35 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1095924860' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T14:13:35.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:35 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1095924860' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T14:13:35.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:35 vm05 ceph-mon[57841]: pgmap v39: 1 pgs: 1 active+undersized+remapped; 577 KiB data, 1.4 GiB used, 159 GiB / 160 GiB avail 2026-04-15T14:13:35.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:35 vm05 ceph-mon[57841]: osdmap e27: 8 total, 8 up, 8 in 2026-04-15T14:13:35.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:35 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/4019926341' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-15T14:13:35.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:35 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1095924860' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T14:13:35.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:35 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1095924860' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T14:13:36.015 INFO:teuthology.orchestra.run.vm05.stdout:[client.1] 2026-04-15T14:13:36.015 INFO:teuthology.orchestra.run.vm05.stdout: key = AQAQnd9pn6adABAA/quHlW4N89Bt+LtdWJhVmA== 2026-04-15T14:13:36.067 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:13:36.067 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-04-15T14:13:36.067 DEBUG:teuthology.orchestra.run.vm05:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-04-15T14:13:36.110 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph config log 1 --format=json 2026-04-15T14:13:36.247 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:36.561 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:36 vm04 ceph-mon[53345]: osdmap e28: 8 total, 8 up, 8 in 2026-04-15T14:13:36.561 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:36 vm04 ceph-mon[53345]: from='client.? 192.168.123.105:0/3699859802' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T14:13:36.561 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:36 vm04 ceph-mon[53345]: from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T14:13:36.561 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:36 vm04 ceph-mon[53345]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T14:13:36.625 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:36.680 INFO:teuthology.orchestra.run.vm04.stdout:[{"version":20,"timestamp":"2026-04-15T14:13:31.454526+0000","name":"","changes":[{"name":"osd.6/osd_mclock_max_capacity_iops_hdd","new_value":"27661.978078"}]}] 2026-04-15T14:13:36.680 INFO:tasks.ceph_manager:config epoch is 20 2026-04-15T14:13:36.680 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-04-15T14:13:36.680 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-04-15T14:13:36.680 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mgr dump --format=json 2026-04-15T14:13:36.819 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:36.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:36 vm05 ceph-mon[57841]: osdmap e28: 8 total, 8 up, 8 in 2026-04-15T14:13:36.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:36 vm05 ceph-mon[57841]: from='client.? 192.168.123.105:0/3699859802' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T14:13:36.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:36 vm05 ceph-mon[57841]: from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T14:13:36.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:36 vm05 ceph-mon[57841]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T14:13:37.234 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:37.315 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":19,"flags":0,"active_gid":14231,"active_name":"vm04.ycniad","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":514871975},{"type":"v1","addr":"192.168.123.104:6801","nonce":514871975}]},"active_addr":"192.168.123.104:6801/514871975","active_change":"2026-04-15T14:12:44.049095+0000","active_mgr_features":4541880224203014143,"available":true,"standbys":[{"gid":14264,"name":"vm05.ozgwuj","mgr_features":4541880224203014143,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.104:8443/","prometheus":"http://192.168.123.104:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"tentacle":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":492024060}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":582437302}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":633786551}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":2841238712}]}]} 2026-04-15T14:13:37.317 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-04-15T14:13:37.317 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-04-15T14:13:37.317 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd dump --format=json 2026-04-15T14:13:37.455 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:37.835 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:37.835 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":28,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","created":"2026-04-15T14:11:13.007357+0000","modified":"2026-04-15T14:13:35.549994+0000","last_up_change":"2026-04-15T14:13:32.369217+0000","last_in_change":"2026-04-15T14:13:13.632102+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-15T14:13:26.340803+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"22","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6800","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6801","nonce":1390887308}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6803","nonce":1390887308}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6807","nonce":1390887308}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6805","nonce":1390887308}]},"public_addr":"192.168.123.105:6801/1390887308","cluster_addr":"192.168.123.105:6803/1390887308","heartbeat_back_addr":"192.168.123.105:6807/1390887308","heartbeat_front_addr":"192.168.123.105:6805/1390887308","state":["exists","up"]},{"osd":1,"uuid":"fc93bc36-2a43-45af-8a80-9f98b42d7ef3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6803","nonce":2939922560}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6805","nonce":2939922560}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6809","nonce":2939922560}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6807","nonce":2939922560}]},"public_addr":"192.168.123.104:6803/2939922560","cluster_addr":"192.168.123.104:6805/2939922560","heartbeat_back_addr":"192.168.123.104:6809/2939922560","heartbeat_front_addr":"192.168.123.104:6807/2939922560","state":["exists","up"]},{"osd":2,"uuid":"59fc7e9a-9099-4253-90bf-a48986abab86","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6809","nonce":2685562761}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6811","nonce":2685562761}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6815","nonce":2685562761}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6813","nonce":2685562761}]},"public_addr":"192.168.123.105:6809/2685562761","cluster_addr":"192.168.123.105:6811/2685562761","heartbeat_back_addr":"192.168.123.105:6815/2685562761","heartbeat_front_addr":"192.168.123.105:6813/2685562761","state":["exists","up"]},{"osd":3,"uuid":"156ad8a0-971a-4d3b-b8e3-a29bb406fc10","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6811","nonce":552697038}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6813","nonce":552697038}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6817","nonce":552697038}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6815","nonce":552697038}]},"public_addr":"192.168.123.104:6811/552697038","cluster_addr":"192.168.123.104:6813/552697038","heartbeat_back_addr":"192.168.123.104:6817/552697038","heartbeat_front_addr":"192.168.123.104:6815/552697038","state":["exists","up"]},{"osd":4,"uuid":"ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6817","nonce":4184869825}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6819","nonce":4184869825}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6823","nonce":4184869825}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6821","nonce":4184869825}]},"public_addr":"192.168.123.105:6817/4184869825","cluster_addr":"192.168.123.105:6819/4184869825","heartbeat_back_addr":"192.168.123.105:6823/4184869825","heartbeat_front_addr":"192.168.123.105:6821/4184869825","state":["exists","up"]},{"osd":5,"uuid":"93129f51-c10e-496f-a439-6e42f717437c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":23,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6819","nonce":3514888349}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6821","nonce":3514888349}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6825","nonce":3514888349}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6823","nonce":3514888349}]},"public_addr":"192.168.123.104:6819/3514888349","cluster_addr":"192.168.123.104:6821/3514888349","heartbeat_back_addr":"192.168.123.104:6825/3514888349","heartbeat_front_addr":"192.168.123.104:6823/3514888349","state":["exists","up"]},{"osd":6,"uuid":"801ea36c-499d-4470-8775-037ff667b7b3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6827","nonce":3842176150}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6829","nonce":3842176150}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6832","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6833","nonce":3842176150}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6831","nonce":3842176150}]},"public_addr":"192.168.123.104:6827/3842176150","cluster_addr":"192.168.123.104:6829/3842176150","heartbeat_back_addr":"192.168.123.104:6833/3842176150","heartbeat_front_addr":"192.168.123.104:6831/3842176150","state":["exists","up"]},{"osd":7,"uuid":"b8b34729-ecc2-43d4-a24f-afe5c06feaac","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6825","nonce":266427133}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6827","nonce":266427133}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6831","nonce":266427133}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6829","nonce":266427133}]},"public_addr":"192.168.123.105:6825/266427133","cluster_addr":"192.168.123.105:6827/266427133","heartbeat_back_addr":"192.168.123.105:6831/266427133","heartbeat_front_addr":"192.168.123.105:6829/266427133","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:20.136975+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:20.412493+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:23.293505+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:23.463722+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:26.459395+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:26.747709+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:29.844429+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:29.386179+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.104:6801/322276771":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/1546778517":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/1070793896":"2026-04-16T14:11:39.957718+0000","192.168.123.104:6801/8060406":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/3915671577":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6800/8060406":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/1128291451":"2026-04-16T14:12:44.048990+0000","192.168.123.104:6800/322276771":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/692560052":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/615164344":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6800/3792063836":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6801/3792063836":"2026-04-16T14:12:02.850399+0000","192.168.123.104:0/3225569276":"2026-04-16T14:12:02.850399+0000","192.168.123.104:0/1245479587":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/1116364314":"2026-04-16T14:12:44.048990+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-15T14:13:37.835 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:37 vm04 ceph-mon[53345]: pgmap v42: 1 pgs: 1 active+undersized+remapped; 577 KiB data, 614 MiB used, 159 GiB / 160 GiB avail 2026-04-15T14:13:37.835 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:37 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4292047495' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-15T14:13:37.835 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:37 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3029550747' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-15T14:13:37.892 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-04-15T14:13:37.892 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd dump --format=json 2026-04-15T14:13:37.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:37 vm05 ceph-mon[57841]: pgmap v42: 1 pgs: 1 active+undersized+remapped; 577 KiB data, 614 MiB used, 159 GiB / 160 GiB avail 2026-04-15T14:13:37.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:37 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/4292047495' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-15T14:13:37.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:37 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3029550747' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-15T14:13:38.038 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.407 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:38.407 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":28,"fsid":"d89dc7c6-38d4-11f1-aa58-cd98464f39ae","created":"2026-04-15T14:11:13.007357+0000","modified":"2026-04-15T14:13:35.549994+0000","last_up_change":"2026-04-15T14:13:32.369217+0000","last_in_change":"2026-04-15T14:13:13.632102+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-15T14:13:26.340803+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"22","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"a7d56dd6-5ea4-4ff2-ae38-e4e5ee1168fc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6800","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6801","nonce":1390887308}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6803","nonce":1390887308}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6807","nonce":1390887308}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":1390887308},{"type":"v1","addr":"192.168.123.105:6805","nonce":1390887308}]},"public_addr":"192.168.123.105:6801/1390887308","cluster_addr":"192.168.123.105:6803/1390887308","heartbeat_back_addr":"192.168.123.105:6807/1390887308","heartbeat_front_addr":"192.168.123.105:6805/1390887308","state":["exists","up"]},{"osd":1,"uuid":"fc93bc36-2a43-45af-8a80-9f98b42d7ef3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6803","nonce":2939922560}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6805","nonce":2939922560}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6809","nonce":2939922560}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":2939922560},{"type":"v1","addr":"192.168.123.104:6807","nonce":2939922560}]},"public_addr":"192.168.123.104:6803/2939922560","cluster_addr":"192.168.123.104:6805/2939922560","heartbeat_back_addr":"192.168.123.104:6809/2939922560","heartbeat_front_addr":"192.168.123.104:6807/2939922560","state":["exists","up"]},{"osd":2,"uuid":"59fc7e9a-9099-4253-90bf-a48986abab86","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6809","nonce":2685562761}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6811","nonce":2685562761}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6815","nonce":2685562761}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":2685562761},{"type":"v1","addr":"192.168.123.105:6813","nonce":2685562761}]},"public_addr":"192.168.123.105:6809/2685562761","cluster_addr":"192.168.123.105:6811/2685562761","heartbeat_back_addr":"192.168.123.105:6815/2685562761","heartbeat_front_addr":"192.168.123.105:6813/2685562761","state":["exists","up"]},{"osd":3,"uuid":"156ad8a0-971a-4d3b-b8e3-a29bb406fc10","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6811","nonce":552697038}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6813","nonce":552697038}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6817","nonce":552697038}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":552697038},{"type":"v1","addr":"192.168.123.104:6815","nonce":552697038}]},"public_addr":"192.168.123.104:6811/552697038","cluster_addr":"192.168.123.104:6813/552697038","heartbeat_back_addr":"192.168.123.104:6817/552697038","heartbeat_front_addr":"192.168.123.104:6815/552697038","state":["exists","up"]},{"osd":4,"uuid":"ee4fe9d8-9ee7-43ef-b4c1-1520b7ddce0a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6817","nonce":4184869825}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6819","nonce":4184869825}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6823","nonce":4184869825}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":4184869825},{"type":"v1","addr":"192.168.123.105:6821","nonce":4184869825}]},"public_addr":"192.168.123.105:6817/4184869825","cluster_addr":"192.168.123.105:6819/4184869825","heartbeat_back_addr":"192.168.123.105:6823/4184869825","heartbeat_front_addr":"192.168.123.105:6821/4184869825","state":["exists","up"]},{"osd":5,"uuid":"93129f51-c10e-496f-a439-6e42f717437c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":23,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6819","nonce":3514888349}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6821","nonce":3514888349}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6825","nonce":3514888349}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":3514888349},{"type":"v1","addr":"192.168.123.104:6823","nonce":3514888349}]},"public_addr":"192.168.123.104:6819/3514888349","cluster_addr":"192.168.123.104:6821/3514888349","heartbeat_back_addr":"192.168.123.104:6825/3514888349","heartbeat_front_addr":"192.168.123.104:6823/3514888349","state":["exists","up"]},{"osd":6,"uuid":"801ea36c-499d-4470-8775-037ff667b7b3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6827","nonce":3842176150}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6829","nonce":3842176150}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6832","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6833","nonce":3842176150}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":3842176150},{"type":"v1","addr":"192.168.123.104:6831","nonce":3842176150}]},"public_addr":"192.168.123.104:6827/3842176150","cluster_addr":"192.168.123.104:6829/3842176150","heartbeat_back_addr":"192.168.123.104:6833/3842176150","heartbeat_front_addr":"192.168.123.104:6831/3842176150","state":["exists","up"]},{"osd":7,"uuid":"b8b34729-ecc2-43d4-a24f-afe5c06feaac","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6825","nonce":266427133}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6827","nonce":266427133}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6831","nonce":266427133}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":266427133},{"type":"v1","addr":"192.168.123.105:6829","nonce":266427133}]},"public_addr":"192.168.123.105:6825/266427133","cluster_addr":"192.168.123.105:6827/266427133","heartbeat_back_addr":"192.168.123.105:6831/266427133","heartbeat_front_addr":"192.168.123.105:6829/266427133","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:20.136975+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:20.412493+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:23.293505+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:23.463722+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:26.459395+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:26.747709+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:29.844429+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T14:13:29.386179+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.104:6801/322276771":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/1546778517":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/1070793896":"2026-04-16T14:11:39.957718+0000","192.168.123.104:6801/8060406":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/3915671577":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6800/8060406":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/1128291451":"2026-04-16T14:12:44.048990+0000","192.168.123.104:6800/322276771":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/692560052":"2026-04-16T14:11:39.957718+0000","192.168.123.104:0/615164344":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6800/3792063836":"2026-04-16T14:12:02.850399+0000","192.168.123.104:6801/3792063836":"2026-04-16T14:12:02.850399+0000","192.168.123.104:0/3225569276":"2026-04-16T14:12:02.850399+0000","192.168.123.104:0/1245479587":"2026-04-16T14:12:44.048990+0000","192.168.123.104:0/1116364314":"2026-04-16T14:12:44.048990+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-15T14:13:38.463 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.0 flush_pg_stats 2026-04-15T14:13:38.463 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.1 flush_pg_stats 2026-04-15T14:13:38.463 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.2 flush_pg_stats 2026-04-15T14:13:38.463 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.3 flush_pg_stats 2026-04-15T14:13:38.464 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.4 flush_pg_stats 2026-04-15T14:13:38.464 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.5 flush_pg_stats 2026-04-15T14:13:38.464 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.6 flush_pg_stats 2026-04-15T14:13:38.464 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph tell osd.7 flush_pg_stats 2026-04-15T14:13:38.737 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.753 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.757 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.758 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:38 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3238779175' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T14:13:38.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:38 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/361383199' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T14:13:38.800 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.809 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.813 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.813 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:38.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:38 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3238779175' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T14:13:38.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:38 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/361383199' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T14:13:39.423 INFO:teuthology.orchestra.run.vm04.stdout:77309411332 2026-04-15T14:13:39.423 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.2 2026-04-15T14:13:39.438 INFO:teuthology.orchestra.run.vm04.stdout:94489280515 2026-04-15T14:13:39.438 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.5 2026-04-15T14:13:39.438 INFO:teuthology.orchestra.run.vm04.stdout:68719476741 2026-04-15T14:13:39.438 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.1 2026-04-15T14:13:39.477 INFO:teuthology.orchestra.run.vm04.stdout:107374182404 2026-04-15T14:13:39.478 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.7 2026-04-15T14:13:39.552 INFO:teuthology.orchestra.run.vm04.stdout:77309411332 2026-04-15T14:13:39.552 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.3 2026-04-15T14:13:39.566 INFO:teuthology.orchestra.run.vm04.stdout:94489280515 2026-04-15T14:13:39.566 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.4 2026-04-15T14:13:39.619 INFO:teuthology.orchestra.run.vm04.stdout:68719476741 2026-04-15T14:13:39.619 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.0 2026-04-15T14:13:39.656 INFO:teuthology.orchestra.run.vm04.stdout:107374182403 2026-04-15T14:13:39.656 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.6 2026-04-15T14:13:39.686 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:39.740 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:39.748 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:39.813 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:39.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:39 vm04 ceph-mon[53345]: pgmap v43: 1 pgs: 1 active+undersized+remapped; 577 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-04-15T14:13:39.904 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:39.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:39 vm05 ceph-mon[57841]: pgmap v43: 1 pgs: 1 active+undersized+remapped; 577 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-04-15T14:13:40.026 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:40.074 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:40.116 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:40.570 INFO:teuthology.orchestra.run.vm04.stdout:77309411331 2026-04-15T14:13:40.601 INFO:teuthology.orchestra.run.vm04.stdout:68719476741 2026-04-15T14:13:40.677 INFO:teuthology.orchestra.run.vm04.stdout:107374182404 2026-04-15T14:13:40.687 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476741 got 68719476741 for osd.1 2026-04-15T14:13:40.687 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:40.727 INFO:teuthology.orchestra.run.vm04.stdout:94489280515 2026-04-15T14:13:40.759 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182404 got 107374182404 for osd.7 2026-04-15T14:13:40.759 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:40.818 INFO:teuthology.orchestra.run.vm04.stdout:77309411332 2026-04-15T14:13:40.819 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411332 got 77309411331 for osd.2 2026-04-15T14:13:40.829 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:40 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/4034666165' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T14:13:40.829 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280515 got 94489280515 for osd.5 2026-04-15T14:13:40.829 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:40.832 INFO:teuthology.orchestra.run.vm04.stdout:107374182403 2026-04-15T14:13:40.861 INFO:teuthology.orchestra.run.vm04.stdout:68719476741 2026-04-15T14:13:40.896 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411332 got 77309411332 for osd.3 2026-04-15T14:13:40.896 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:40.909 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182403 got 107374182403 for osd.6 2026-04-15T14:13:40.910 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:40.911 INFO:teuthology.orchestra.run.vm04.stdout:94489280515 2026-04-15T14:13:40.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:40 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/4034666165' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T14:13:40.980 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280515 got 94489280515 for osd.4 2026-04-15T14:13:40.980 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:40.989 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476741 got 68719476741 for osd.0 2026-04-15T14:13:40.989 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:41.820 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph osd last-stat-seq osd.2 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3425858041' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1994781235' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/729793328' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1692632943' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/3112240998' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2604651887' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T14:13:41.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:41 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/1561961826' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3425858041' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1994781235' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/729793328' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1692632943' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/3112240998' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/2604651887' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T14:13:41.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:41 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/1561961826' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T14:13:41.962 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:42.345 INFO:teuthology.orchestra.run.vm04.stdout:77309411332 2026-04-15T14:13:42.422 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411332 got 77309411332 for osd.2 2026-04-15T14:13:42.422 DEBUG:teuthology.parallel:result is None 2026-04-15T14:13:42.422 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-04-15T14:13:42.422 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph pg dump --format=json 2026-04-15T14:13:42.599 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:42.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:42 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/805995912' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T14:13:42.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:42 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/805995912' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T14:13:42.974 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:42.975 INFO:teuthology.orchestra.run.vm04.stderr:dumped all 2026-04-15T14:13:43.054 INFO:teuthology.orchestra.run.vm04.stdout:{"pg_ready":true,"pg_map":{"version":45,"stamp":"2026-04-15T14:13:42.064233+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":86,"num_read_kb":73,"num_write":145,"num_write_kb":2672,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":112,"ondisk_log_size":112,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167706624,"kb_used":218652,"kb_used_data":3884,"kb_used_omap":60,"kb_used_meta":214467,"kb_avail":167487972,"statfs":{"total":171731582976,"available":171507683328,"internally_reserved":0,"allocated":3977216,"data_stored":2602496,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":62344,"internal_metadata":219614328},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.001224"},"pg_stats":[{"pgid":"1.0","version":"21'112","reported_seq":158,"reported_epoch":28,"state":"active+clean","last_fresh":"2026-04-15T14:13:35.565891+0000","last_change":"2026-04-15T14:13:35.565285+0000","last_active":"2026-04-15T14:13:35.565891+0000","last_peered":"2026-04-15T14:13:35.565891+0000","last_clean":"2026-04-15T14:13:35.565891+0000","last_became_active":"2026-04-15T14:13:35.565134+0000","last_became_peered":"2026-04-15T14:13:35.565134+0000","last_unstale":"2026-04-15T14:13:35.565891+0000","last_undegraded":"2026-04-15T14:13:35.565891+0000","last_fullsized":"2026-04-15T14:13:35.565891+0000","mapping_epoch":27,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":28,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-15T14:13:27.179316+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-15T14:13:27.179316+0000","last_clean_scrub_stamp":"2026-04-15T14:13:27.179316+0000","objects_scrubbed":0,"log_size":112,"log_dups_size":0,"ondisk_log_size":112,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-16T16:30:42.892705+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":86,"num_read_kb":73,"num_write":145,"num_write_kb":2672,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,5,2],"acting":[7,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":86,"num_read_kb":73,"num_write":145,"num_write_kb":2672,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":2375680,"data_stored":2361472,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":112,"ondisk_log_size":112,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":25,"seq":107374182404,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27688,"kb_used_data":848,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20935640,"statfs":{"total":21466447872,"available":21438095360,"internally_reserved":0,"allocated":868352,"data_stored":694292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6159,"internal_metadata":27453425},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":25,"seq":107374182403,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6163,"internal_metadata":27453421},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":22,"seq":94489280516,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27688,"kb_used_data":848,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20935640,"statfs":{"total":21466447872,"available":21438095360,"internally_reserved":0,"allocated":868352,"data_stored":694292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8121,"internal_metadata":27451463},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":22,"seq":94489280516,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8120,"internal_metadata":27451464},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6168,"internal_metadata":27453416},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":18,"seq":77309411333,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27688,"kb_used_data":848,"kb_used_omap":9,"kb_used_meta":26806,"kb_avail":20935640,"statfs":{"total":21466447872,"available":21438095360,"internally_reserved":0,"allocated":868352,"data_stored":694292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10071,"internal_metadata":27449513},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27452113},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":268,"kb_used_omap":9,"kb_used_meta":26806,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10071,"internal_metadata":27449513},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-15T14:13:43.055 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph pg dump --format=json 2026-04-15T14:13:43.200 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:43.572 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:43.572 INFO:teuthology.orchestra.run.vm04.stderr:dumped all 2026-04-15T14:13:43.629 INFO:teuthology.orchestra.run.vm04.stdout:{"pg_ready":true,"pg_map":{"version":45,"stamp":"2026-04-15T14:13:42.064233+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":86,"num_read_kb":73,"num_write":145,"num_write_kb":2672,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":112,"ondisk_log_size":112,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167706624,"kb_used":218652,"kb_used_data":3884,"kb_used_omap":60,"kb_used_meta":214467,"kb_avail":167487972,"statfs":{"total":171731582976,"available":171507683328,"internally_reserved":0,"allocated":3977216,"data_stored":2602496,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":62344,"internal_metadata":219614328},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.001224"},"pg_stats":[{"pgid":"1.0","version":"21'112","reported_seq":158,"reported_epoch":28,"state":"active+clean","last_fresh":"2026-04-15T14:13:35.565891+0000","last_change":"2026-04-15T14:13:35.565285+0000","last_active":"2026-04-15T14:13:35.565891+0000","last_peered":"2026-04-15T14:13:35.565891+0000","last_clean":"2026-04-15T14:13:35.565891+0000","last_became_active":"2026-04-15T14:13:35.565134+0000","last_became_peered":"2026-04-15T14:13:35.565134+0000","last_unstale":"2026-04-15T14:13:35.565891+0000","last_undegraded":"2026-04-15T14:13:35.565891+0000","last_fullsized":"2026-04-15T14:13:35.565891+0000","mapping_epoch":27,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":28,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-15T14:13:27.179316+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-15T14:13:27.179316+0000","last_clean_scrub_stamp":"2026-04-15T14:13:27.179316+0000","objects_scrubbed":0,"log_size":112,"log_dups_size":0,"ondisk_log_size":112,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-16T16:30:42.892705+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":86,"num_read_kb":73,"num_write":145,"num_write_kb":2672,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,5,2],"acting":[7,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":86,"num_read_kb":73,"num_write":145,"num_write_kb":2672,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":2375680,"data_stored":2361472,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":112,"ondisk_log_size":112,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":25,"seq":107374182404,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27688,"kb_used_data":848,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20935640,"statfs":{"total":21466447872,"available":21438095360,"internally_reserved":0,"allocated":868352,"data_stored":694292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6159,"internal_metadata":27453425},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":25,"seq":107374182403,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6163,"internal_metadata":27453421},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":22,"seq":94489280516,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27688,"kb_used_data":848,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20935640,"statfs":{"total":21466447872,"available":21438095360,"internally_reserved":0,"allocated":868352,"data_stored":694292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8121,"internal_metadata":27451463},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":22,"seq":94489280516,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8120,"internal_metadata":27451464},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6168,"internal_metadata":27453416},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":18,"seq":77309411333,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27688,"kb_used_data":848,"kb_used_omap":9,"kb_used_meta":26806,"kb_avail":20935640,"statfs":{"total":21466447872,"available":21438095360,"internally_reserved":0,"allocated":868352,"data_stored":694292,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10071,"internal_metadata":27449513},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27116,"kb_used_data":268,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20936212,"statfs":{"total":21466447872,"available":21438681088,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27452113},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":268,"kb_used_omap":9,"kb_used_meta":26806,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":274432,"data_stored":103924,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10071,"internal_metadata":27449513},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-15T14:13:43.630 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-04-15T14:13:43.630 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-04-15T14:13:43.630 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-04-15T14:13:43.630 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph health --format=json 2026-04-15T14:13:43.768 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:43.824 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:43 vm04 ceph-mon[53345]: pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:43.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:43 vm05 ceph-mon[57841]: pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:44.187 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:44.187 INFO:teuthology.orchestra.run.vm04.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-04-15T14:13:44.259 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-04-15T14:13:44.260 INFO:tasks.cephadm:Setup complete, yielding 2026-04-15T14:13:44.260 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T14:13:44.262 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm04.local 2026-04-15T14:13:44.262 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch status' 2026-04-15T14:13:44.402 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:44.808 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:44 vm04 ceph-mon[53345]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:44.808 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:44 vm04 ceph-mon[53345]: from='client.14542 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:44.808 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:44 vm04 ceph-mon[53345]: from='client.? 192.168.123.104:0/2900624605' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-15T14:13:44.808 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:44 vm04 ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:44.809 INFO:teuthology.orchestra.run.vm04.stdout:Backend: cephadm 2026-04-15T14:13:44.809 INFO:teuthology.orchestra.run.vm04.stdout:Available: Yes 2026-04-15T14:13:44.809 INFO:teuthology.orchestra.run.vm04.stdout:Paused: No 2026-04-15T14:13:44.866 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch ps' 2026-04-15T14:13:44.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:44 vm05 ceph-mon[57841]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:44.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:44 vm05 ceph-mon[57841]: from='client.14542 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:44.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:44 vm05 ceph-mon[57841]: from='client.? 192.168.123.104:0/2900624605' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-15T14:13:44.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:44 vm05 ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:13:45.003 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.vm04 vm04 *:9093,9094 running (42s) 13s ago 88s 19.2M - 0.28.1 91c01b3cec9b 046e19e1d7ef 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:ceph-exporter.vm04 vm04 *:9926 running (95s) 13s ago 94s 8744k - 20.2.0-19-g7ec4401a095 259950fb12cb ac42288b7c30 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:ceph-exporter.vm05 vm05 *:9926 running (56s) 14s ago 56s 9135k - 20.2.0-19-g7ec4401a095 259950fb12cb 973a0e5e1218 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:crash.vm04 vm04 running (94s) 13s ago 94s 11.1M - 20.2.0-19-g7ec4401a095 259950fb12cb f1ba08f4a680 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:crash.vm05 vm05 running (55s) 14s ago 55s 11.1M - 20.2.0-19-g7ec4401a095 259950fb12cb cd842580320f 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:grafana.vm04 vm04 *:3000 running (41s) 13s ago 79s 134M - 12.2.0 1849e2140421 f4986a9d59d0 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:mgr.vm04.ycniad vm04 *:9283,8765,8443 running (2m) 13s ago 2m 547M - 20.2.0-19-g7ec4401a095 259950fb12cb b8faa7587f88 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:mgr.vm05.ozgwuj vm05 *:8443,9283,8765 running (51s) 14s ago 51s 476M - 20.2.0-19-g7ec4401a095 259950fb12cb b7dc555fed45 2026-04-15T14:13:45.395 INFO:teuthology.orchestra.run.vm04.stdout:mon.vm04 vm04 running (2m) 13s ago 2m 51.7M 2048M 20.2.0-19-g7ec4401a095 259950fb12cb 6257b904a435 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:mon.vm05 vm05 running (50s) 14s ago 50s 45.2M 2048M 20.2.0-19-g7ec4401a095 259950fb12cb c33af830112a 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.vm04 vm04 *:9100 running (91s) 13s ago 91s 15.1M - 1.9.1 255ec253085f 0afcbfea792f 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.vm05 vm05 *:9100 running (52s) 14s ago 52s 11.1M - 1.9.1 255ec253085f bef5815ebaec 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm05 running (26s) 14s ago 26s 67.0M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb d2151bc5647c 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (26s) 13s ago 26s 55.7M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb fda113eaca8f 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm05 running (23s) 14s ago 23s 67.5M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb 20816d9bae45 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (23s) 13s ago 23s 30.1M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb 7714f29306f4 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm05 running (20s) 14s ago 20s 54.0M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb b01e22f64201 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm04 running (19s) 13s ago 19s 53.7M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb c10f625df3e7 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm04 running (16s) 13s ago 16s 34.9M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb 7458cb7b5ae0 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm05 running (17s) 14s ago 17s 35.0M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb aa065f2eb46a 2026-04-15T14:13:45.396 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.vm04 vm04 *:9095 running (40s) 13s ago 72s 32.0M - 3.6.0 4fcecf061b74 0682832fc841 2026-04-15T14:13:45.446 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch ls' 2026-04-15T14:13:45.607 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:45.657 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:45 vm04 ceph-mon[53345]: pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:45.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:45 vm05 ceph-mon[57841]: pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager ?:9093,9094 1/1 14s ago 115s count:1 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:ceph-exporter ?:9926 2/2 14s ago 117s * 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:crash 2/2 14s ago 117s * 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:grafana ?:3000 1/1 14s ago 116s count:1 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:mgr 2/2 14s ago 117s count:2 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:mon 2/2 14s ago 91s vm04:192.168.123.104=vm04;vm05:192.168.123.105=vm05;count:2 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter ?:9100 2/2 14s ago 115s * 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:osd.all-available-devices 8 14s ago 41s * 2026-04-15T14:13:46.004 INFO:teuthology.orchestra.run.vm04.stdout:prometheus ?:9095 1/1 14s ago 116s count:1 2026-04-15T14:13:46.074 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch host ls' 2026-04-15T14:13:46.230 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:46.636 INFO:teuthology.orchestra.run.vm04.stdout:HOST ADDR LABELS STATUS 2026-04-15T14:13:46.636 INFO:teuthology.orchestra.run.vm04.stdout:vm04 192.168.123.104 2026-04-15T14:13:46.636 INFO:teuthology.orchestra.run.vm04.stdout:vm05 192.168.123.105 2026-04-15T14:13:46.636 INFO:teuthology.orchestra.run.vm04.stdout:2 hosts in cluster 2026-04-15T14:13:46.697 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch device ls' 2026-04-15T14:13:46.839 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:46.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:46 vm04 ceph-mon[53345]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:46.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:46 vm04 ceph-mon[53345]: from='client.14554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:46.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:46 vm05 ceph-mon[57841]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:46.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:46 vm05 ceph-mon[57841]: from='client.14554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme0n1 hdd Linux_6b7ad07c165929411c9d 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme1n1 hdd Linux_fff789e869d944ed0405 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme2n1 hdd Linux_3369c177c0052290acf5 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme3n1 hdd Linux_6212e725753f54ffe728 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 13s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdb hdd DWNBRSTVMM04001 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdc hdd DWNBRSTVMM04002 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdd hdd DWNBRSTVMM04003 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vde hdd DWNBRSTVMM04004 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme0n1 hdd Linux_aa69ffdfa7408bf7867c 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme1n1 hdd Linux_6884663c6eaa85c58c9d 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme2n1 hdd Linux_01fde6453ce49045a1d5 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme3n1 hdd Linux_0105428e5b971c67554c 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 14s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdb hdd DWNBRSTVMM05001 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdc hdd DWNBRSTVMM05002 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdd hdd DWNBRSTVMM05003 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.244 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vde hdd DWNBRSTVMM05004 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:47.308 INFO:teuthology.run_tasks:Running task vip... 2026-04-15T14:13:47.311 INFO:tasks.vip:Allocating static IPs for each host... 2026-04-15T14:13:47.311 INFO:tasks.vip:peername 192.168.123.104 2026-04-15T14:13:47.311 INFO:tasks.vip:192.168.123.104 in 192.168.123.0/24, pos 103 2026-04-15T14:13:47.311 INFO:tasks.vip:vm04.local static 12.12.0.104, vnet 12.12.0.0/22 2026-04-15T14:13:47.311 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.104')] 2026-04-15T14:13:47.311 DEBUG:teuthology.orchestra.run.vm04:> sudo ip route ls 2026-04-15T14:13:47.340 INFO:teuthology.orchestra.run.vm04.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.104 metric 100 2026-04-15T14:13:47.340 INFO:teuthology.orchestra.run.vm04.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.104 metric 100 2026-04-15T14:13:47.341 INFO:tasks.vip:Configuring 12.12.0.104 on vm04.local iface eth0... 2026-04-15T14:13:47.341 DEBUG:teuthology.orchestra.run.vm04:> sudo ip addr add 12.12.0.104/22 dev eth0 2026-04-15T14:13:47.410 INFO:tasks.vip:peername 192.168.123.105 2026-04-15T14:13:47.410 INFO:tasks.vip:192.168.123.105 in 192.168.123.0/24, pos 104 2026-04-15T14:13:47.411 INFO:tasks.vip:vm05.local static 12.12.0.105, vnet 12.12.0.0/22 2026-04-15T14:13:47.411 DEBUG:teuthology.orchestra.run.vm05:> sudo ip route ls 2026-04-15T14:13:47.440 INFO:teuthology.orchestra.run.vm05.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.105 metric 100 2026-04-15T14:13:47.440 INFO:teuthology.orchestra.run.vm05.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.105 metric 100 2026-04-15T14:13:47.441 INFO:tasks.vip:Configuring 12.12.0.105 on vm05.local iface eth0... 2026-04-15T14:13:47.441 DEBUG:teuthology.orchestra.run.vm05:> sudo ip addr add 12.12.0.105/22 dev eth0 2026-04-15T14:13:47.508 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T14:13:47.510 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm04.local 2026-04-15T14:13:47.510 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch device ls --refresh' 2026-04-15T14:13:47.674 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:47.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:47 vm05.local ceph-mon[57841]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:47.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:47 vm05.local ceph-mon[57841]: pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:47.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:47 vm05.local ceph-mon[57841]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:47.794 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:47 vm04.local ceph-mon[53345]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:47.794 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:47 vm04.local ceph-mon[53345]: pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:47.794 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:47 vm04.local ceph-mon[53345]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:48.069 INFO:teuthology.orchestra.run.vm04.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme0n1 hdd Linux_6b7ad07c165929411c9d 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme1n1 hdd Linux_fff789e869d944ed0405 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme2n1 hdd Linux_3369c177c0052290acf5 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme3n1 hdd Linux_6212e725753f54ffe728 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 14s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdb hdd DWNBRSTVMM04001 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdc hdd DWNBRSTVMM04002 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdd hdd DWNBRSTVMM04003 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vde hdd DWNBRSTVMM04004 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme0n1 hdd Linux_aa69ffdfa7408bf7867c 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme1n1 hdd Linux_6884663c6eaa85c58c9d 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme2n1 hdd Linux_01fde6453ce49045a1d5 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme3n1 hdd Linux_0105428e5b971c67554c 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 15s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdb hdd DWNBRSTVMM05001 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdc hdd DWNBRSTVMM05002 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdd hdd DWNBRSTVMM05003 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.070 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vde hdd DWNBRSTVMM05004 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:13:48.127 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-04-15T14:13:48.131 INFO:tasks.cephadm:Applying spec(s): placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 --- placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: 12.12.1.104/22 2026-04-15T14:13:48.131 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch apply -i - 2026-04-15T14:13:48.275 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:48.613 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:48 vm04.local ceph-mon[53345]: from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:48.613 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:13:48.725 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled rgw.foo update... 2026-04-15T14:13:48.725 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled ingress.rgw.foo update... 2026-04-15T14:13:48.811 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-15T14:13:48.813 INFO:tasks.cephadm:Waiting for ceph service rgw.foo to start (timeout 300)... 2026-04-15T14:13:48.814 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:13:48.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:48 vm05.local ceph-mon[57841]: from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:48.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:13:48.968 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:49.415 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:49.415 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:31.933805Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:31.933718Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:31.933747Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:31.933848Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.724536Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:31.933686Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:31.933633Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:31.933776Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:31.933911Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:31.933880Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.720999Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:48.715592Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T14:13:49.482 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:13:49.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:49.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:49.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:49.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: Saving service rgw.foo spec with placement count:4;* 2026-04-15T14:13:49.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:49.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: Saving service ingress.rgw.foo spec with placement count:2 2026-04-15T14:13:49.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:49 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: Saving service rgw.foo spec with placement count:4;* 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: Saving service ingress.rgw.foo spec with placement count:2 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:49 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:50.484 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:13:50.635 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:51.011 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:50 vm05.local ceph-mon[57841]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:51.011 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:50 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:51.011 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:50 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:51.040 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:51.040 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.724536Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.720999Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:48.715592Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T14:13:51.041 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:50 vm04.local ceph-mon[53345]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:51.041 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:50 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:51.041 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:50 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:51.106 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:13:52.027 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:51 vm05.local ceph-mon[57841]: pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:52.027 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:51 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.027 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:51 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.027 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:51 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.027 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:51 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.027 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:51 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:52.027 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:51 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:52.106 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:13:52.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:51 vm04.local ceph-mon[53345]: pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:52.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:51 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:51 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:51 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:51 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:51 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:51 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:13:52.251 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:52.642 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:52.642 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.724536Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.720999Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:48.715592Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T14:13:52.699 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:13:52.917 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:52.917 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.pzlhsk", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.pzlhsk", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: Deploying daemon rgw.foo.vm05.pzlhsk on vm05 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.owsxoy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.owsxoy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:52.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:52 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.pzlhsk", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.pzlhsk", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: Deploying daemon rgw.foo.vm05.pzlhsk on vm05 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.owsxoy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.owsxoy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:52 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:53.700 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:13:53.879 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:54.001 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: Deploying daemon rgw.foo.vm04.owsxoy on vm04 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: osdmap e29: 8 total, 8 up, 8 in 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4131023700' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.dhvjjs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.dhvjjs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.002 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: Deploying daemon rgw.foo.vm04.owsxoy on vm04 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: osdmap e29: 8 total, 8 up, 8 in 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4131023700' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.dhvjjs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.dhvjjs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.052 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:54.291 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:54.291 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.724536Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:53.525270Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:48.715592Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T14:13:54.349 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: Deploying daemon rgw.foo.vm05.dhvjjs on vm05 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: osdmap e30: 8 total, 8 up, 8 in 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: pgmap v53: 33 pgs: 9 creating+peering, 12 active+clean, 12 unknown; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 639 B/s wr, 1 op/s 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.lqvrsn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.lqvrsn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: Deploying daemon rgw.foo.vm04.lqvrsn on vm04 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: osdmap e31: 8 total, 8 up, 8 in 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:54.925 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:54.926 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:54 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: Deploying daemon rgw.foo.vm05.dhvjjs on vm05 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: osdmap e30: 8 total, 8 up, 8 in 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: pgmap v53: 33 pgs: 9 creating+peering, 12 active+clean, 12 unknown; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 639 B/s wr, 1 op/s 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.lqvrsn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.lqvrsn", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: Deploying daemon rgw.foo.vm04.lqvrsn on vm04 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: osdmap e31: 8 total, 8 up, 8 in 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:54 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T14:13:55.350 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:13:55.499 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:55.918 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:55.918 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.724536Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:13:55.976 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: Saving service rgw.foo spec with placement count:4;* 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: Deploying daemon haproxy.rgw.foo.vm05.ffntij on vm05 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: osdmap e32: 8 total, 8 up, 8 in 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:56 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: Saving service rgw.foo spec with placement count:4;* 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: Deploying daemon haproxy.rgw.foo.vm05.ffntij on vm05 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: osdmap e32: 8 total, 8 up, 8 in 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:56 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:56.977 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:13:57.121 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: from='client.14628 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: pgmap v56: 65 pgs: 16 creating+peering, 20 active+clean, 29 unknown; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 1.5 KiB/s wr, 5 op/s 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: osdmap e33: 8 total, 8 up, 8 in 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.279 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:57 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.581 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:57.581 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:48.724536Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: from='client.14628 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: pgmap v56: 65 pgs: 16 creating+peering, 20 active+clean, 29 unknown; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 1.5 KiB/s wr, 5 op/s 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: osdmap e33: 8 total, 8 up, 8 in 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.606 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:57 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T14:13:57.666 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='client.14632 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.571 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:58 vm05.local ceph-mon[57841]: osdmap e34: 8 total, 8 up, 8 in 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='client.14632 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T14:13:58.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:58 vm04.local ceph-mon[53345]: osdmap e34: 8 total, 8 up, 8 in 2026-04-15T14:13:58.667 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:13:58.825 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:13:59.232 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:13:59.232 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:59.147172Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:13:59.295 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: pgmap v59: 97 pgs: 16 creating+peering, 32 active+clean, 49 unknown; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 511 B/s wr, 3 op/s 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: osdmap e35: 8 total, 8 up, 8 in 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:59.614 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:13:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: pgmap v59: 97 pgs: 16 creating+peering, 32 active+clean, 49 unknown; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 511 B/s wr, 3 op/s 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: osdmap e35: 8 total, 8 up, 8 in 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:13:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:13:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:00.296 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: Deploying daemon haproxy.rgw.foo.vm04.lpycfq on vm04 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.14636 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: osdmap e36: 8 total, 8 up, 8 in 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.442 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:00.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: Deploying daemon haproxy.rgw.foo.vm04.lpycfq on vm04 2026-04-15T14:14:00.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.14636 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: osdmap e36: 8 total, 8 up, 8 in 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/3287815887' entity='client.rgw.foo.vm05.dhvjjs' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T14:14:00.852 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:00.852 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:59.147172Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:00.991 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:01.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: pgmap v62: 129 pgs: 12 creating+peering, 70 active+clean, 47 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 1.5 KiB/s wr, 13 op/s 2026-04-15T14:14:01.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:01 vm04.local ceph-mon[53345]: osdmap e37: 8 total, 8 up, 8 in 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: pgmap v62: 129 pgs: 12 creating+peering, 70 active+clean, 47 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 1.5 KiB/s wr, 13 op/s 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3796942599' entity='client.rgw.foo.vm04.owsxoy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='client.? 192.168.123.105:0/4224359614' entity='client.rgw.foo.vm05.pzlhsk' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/394633107' entity='client.rgw.foo.vm04.lqvrsn' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: from='client.? ' entity='client.rgw.foo.vm05.dhvjjs' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T14:14:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:01 vm05.local ceph-mon[57841]: osdmap e37: 8 total, 8 up, 8 in 2026-04-15T14:14:01.993 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:02.180 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:02.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:02 vm04.local ceph-mon[53345]: from='client.14640 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:02.676 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:02.676 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:59.147172Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:02.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:02 vm05.local ceph-mon[57841]: from='client.14640 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:02.783 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: pgmap v64: 129 pgs: 5 creating+peering, 116 active+clean, 8 unknown; 580 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 3.8 KiB/s wr, 55 op/s 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='client.14644 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:03 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.785 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: pgmap v64: 129 pgs: 5 creating+peering, 116 active+clean, 8 unknown; 580 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 3.8 KiB/s wr, 55 op/s 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='client.14644 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.818 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:03 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:03.938 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:04.332 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:04.332 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:03.138669Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:04.436 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:04.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:04 vm04.local ceph-mon[53345]: 12.12.1.104 is in 12.12.0.0/22 on vm04 interface eth0 2026-04-15T14:14:04.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:04 vm04.local ceph-mon[53345]: 12.12.1.104 is in 12.12.0.0/22 on vm05 interface eth0 2026-04-15T14:14:04.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:04 vm04.local ceph-mon[53345]: Deploying daemon keepalived.rgw.foo.vm04.uutjkf on vm04 2026-04-15T14:14:04.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:04 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:04.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:04 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:04.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:04 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:04.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:04 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:04.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:04 vm05.local ceph-mon[57841]: 12.12.1.104 is in 12.12.0.0/22 on vm04 interface eth0 2026-04-15T14:14:04.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:04 vm05.local ceph-mon[57841]: 12.12.1.104 is in 12.12.0.0/22 on vm05 interface eth0 2026-04-15T14:14:04.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:04 vm05.local ceph-mon[57841]: Deploying daemon keepalived.rgw.foo.vm04.uutjkf on vm04 2026-04-15T14:14:04.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:04 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:04.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:04 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:04.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:04 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:04.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:04 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:05.437 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:05.642 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:05.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:05 vm05.local ceph-mon[57841]: pgmap v65: 129 pgs: 129 active+clean; 584 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 254 KiB/s rd, 8.2 KiB/s wr, 466 op/s 2026-04-15T14:14:05.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:05 vm05.local ceph-mon[57841]: from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:05.705 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:05 vm04.local ceph-mon[53345]: pgmap v65: 129 pgs: 129 active+clean; 584 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 254 KiB/s rd, 8.2 KiB/s wr, 466 op/s 2026-04-15T14:14:05.705 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:05 vm04.local ceph-mon[53345]: from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:06.147 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:06.147 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:03.138669Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:06.208 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:06.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:06 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:06.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:06 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:06.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:06 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:06.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:06 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:06.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:06 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:06 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:06 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:06 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:07.209 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:07.487 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:07.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:07 vm05.local ceph-mon[57841]: pgmap v66: 129 pgs: 129 active+clean; 586 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 281 KiB/s rd, 8.0 KiB/s wr, 517 op/s 2026-04-15T14:14:07.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:07 vm05.local ceph-mon[57841]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:07.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:07 vm04.local ceph-mon[53345]: pgmap v66: 129 pgs: 129 active+clean; 586 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 281 KiB/s rd, 8.0 KiB/s wr, 517 op/s 2026-04-15T14:14:07.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:07 vm04.local ceph-mon[53345]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:07.900 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:07.900 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:07.632806Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:07.964 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: 12.12.1.104 is in 12.12.0.0/22 on vm05 interface eth0 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: 12.12.1.104 is in 12.12.0.0/22 on vm04 interface eth0 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: Deploying daemon keepalived.rgw.foo.vm05.txbkes on vm05 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:08.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:08.964 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:08.992 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: 12.12.1.104 is in 12.12.0.0/22 on vm05 interface eth0 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: 12.12.1.104 is in 12.12.0.0/22 on vm04 interface eth0 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: Deploying daemon keepalived.rgw.foo.vm05.txbkes on vm05 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:08.993 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:09.122 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:09.541 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:09.541 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:07.632806Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:09.598 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:09 vm04.local ceph-mon[53345]: from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:09 vm04.local ceph-mon[53345]: pgmap v67: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 303 KiB/s rd, 7.8 KiB/s wr, 558 op/s 2026-04-15T14:14:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:09 vm05.local ceph-mon[57841]: from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:09 vm05.local ceph-mon[57841]: pgmap v67: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 303 KiB/s rd, 7.8 KiB/s wr, 558 op/s 2026-04-15T14:14:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:10.598 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:10.743 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:11.098 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:10 vm05.local ceph-mon[57841]: from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:11.098 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:10 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:11.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:10 vm04.local ceph-mon[53345]: from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:11.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:10 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:11.153 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:11.153 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:07.632806Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:11.239 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:12.065 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:11 vm05.local ceph-mon[57841]: pgmap v68: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 250 KiB/s rd, 6.4 KiB/s wr, 460 op/s 2026-04-15T14:14:12.065 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:11 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.065 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:11 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.065 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:11 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.065 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:11 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.071 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:11 vm04.local ceph-mon[53345]: pgmap v68: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 250 KiB/s rd, 6.4 KiB/s wr, 460 op/s 2026-04-15T14:14:12.071 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:11 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.071 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:11 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.071 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:11 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.071 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:11 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:12.239 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:12.390 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:12.814 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:12.814 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:13:49.673502Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:13:48.921197Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:13:48.921259Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:13:49.673551Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:11.780458Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:13:48.921333Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:13:48.921366Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:13:48.921298Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:13:48.921399Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:13:49.673587Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T14:14:12.815 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:12 vm04.local ceph-mon[53345]: from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:12.815 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:12 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:12.815 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:12 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:12.815 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:12 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:12.815 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:12 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:12.815 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:12 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:14:12.884 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T14:14:13.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:12 vm05.local ceph-mon[57841]: from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:13.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:12 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:13.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:12 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:13.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:12 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:13.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:12 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:13.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:12 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:14:13.885 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:14.036 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:14.061 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: pgmap v69: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 224 KiB/s rd, 5.7 KiB/s wr, 413 op/s 2026-04-15T14:14:14.061 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.061 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.061 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.061 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:14:14.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: pgmap v69: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 224 KiB/s rd, 5.7 KiB/s wr, 413 op/s 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:14:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:14:14.451 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:14.452 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:14:13.572782Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:14:12.895103Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:14:12.895177Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:14:13.572845Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:11.780458Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "last_refresh": "2026-04-15T14:14:12.895531Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:14:12.895248Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:14:12.895281Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:14:12.895214Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:14:12.895313Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "last_refresh": "2026-04-15T14:14:13.572901Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "last_refresh": "2026-04-15T14:14:12.895443Z", "ports": [8000, 8001], "running": 4, "size": 4}}] 2026-04-15T14:14:14.516 INFO:tasks.cephadm:rgw.foo has 4/4 2026-04-15T14:14:14.516 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-15T14:14:14.518 INFO:tasks.cephadm:Waiting for ceph service ingress.rgw.foo to start (timeout 300)... 2026-04-15T14:14:14.518 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph orch ls -f json 2026-04-15T14:14:14.672 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:15.091 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-15T14:14:15.091 INFO:teuthology.orchestra.run.vm04.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T14:11:50.614666Z", "last_refresh": "2026-04-15T14:14:13.572782Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:12:49.203440Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T14:11:48.942248Z", "last_refresh": "2026-04-15T14:14:12.895103Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:49.949157Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T14:11:48.499114Z", "last_refresh": "2026-04-15T14:14:12.895177Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T14:11:49.787904Z", "last_refresh": "2026-04-15T14:14:13.572845Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T14:14:11.780458Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.104/22"}, "status": {"created": "2026-04-15T14:13:48.721209Z", "last_refresh": "2026-04-15T14:14:12.895531Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.104/22"}}, {"events": ["2026-04-15T14:12:53.511392Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T14:11:48.079897Z", "last_refresh": "2026-04-15T14:14:12.895248Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:54.612187Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm04:192.168.123.104=vm04", "vm05:192.168.123.105=vm05"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T14:12:14.225736Z", "last_refresh": "2026-04-15T14:14:12.895281Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T14:12:52.753539Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T14:11:50.209840Z", "last_refresh": "2026-04-15T14:14:12.895214Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T14:13:04.879191Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T14:13:04.869796Z", "last_refresh": "2026-04-15T14:14:12.895313Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T14:12:54.616280Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T14:11:49.357873Z", "ports": [9095], "running": 0, "size": 1}}, {"events": ["2026-04-15T14:13:55.291070Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T14:13:55.285887Z", "last_refresh": "2026-04-15T14:14:12.895443Z", "ports": [8000, 8001], "running": 4, "size": 4}}] 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: Checking dashboard <-> RGW credentials 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.151 INFO:tasks.cephadm:ingress.rgw.foo has 4/4 2026-04-15T14:14:15.151 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T14:14:15.154 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm04.local 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'echo "Check while healthy..." 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> curl http://12.12.1.104:9000/ 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> # stop each rgw in turn 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> echo "Check with each rgw stopped in turn..." 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> for rgw in `ceph orch ps | grep ^rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> ceph orch daemon stop $rgw 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep stopped; do echo '"'"'Waiting for $rgw to stop'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> timeout 300 bash -c "while ! curl http://12.12.1.104:9000/ ; do echo '"'"'Waiting for http://12.12.1.104:9000/ to be available'"'"'; sleep 1 ; done" 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> ceph orch daemon start $rgw 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep running; do echo '"'"'Waiting for $rgw to start'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> done 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> # stop each haproxy in turn 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> echo "Check with each haproxy down in turn..." 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> ceph orch daemon stop $haproxy 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep stopped; do echo '"'"'Waiting for $haproxy to stop'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> timeout 300 bash -c "while ! curl http://12.12.1.104:9000/ ; do echo '"'"'Waiting for http://12.12.1.104:9000/ to be available'"'"'; sleep 1 ; done" 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> ceph orch daemon start $haproxy 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep running; do echo '"'"'Waiting for $haproxy to start'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-04-15T14:14:15.154 DEBUG:teuthology.orchestra.run.vm04:> done 2026-04-15T14:14:15.155 DEBUG:teuthology.orchestra.run.vm04:> 2026-04-15T14:14:15.155 DEBUG:teuthology.orchestra.run.vm04:> timeout 300 bash -c "while ! curl http://12.12.1.104:9000/ ; do echo '"'"'Waiting for http://12.12.1.104:9000/ to be available'"'"'; sleep 1 ; done"' 2026-04-15T14:14:15.304 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:14:15.319 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:15.319 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: Checking dashboard <-> RGW credentials 2026-04-15T14:14:15.319 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:15.319 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.320 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:15.392 INFO:teuthology.orchestra.run.vm04.stdout:Check while healthy... 2026-04-15T14:14:15.394 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:14:15.394 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:14:15.395 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:14:15.396 INFO:teuthology.orchestra.run.vm04.stdout:anonymousCheck with each rgw stopped in turn... 2026-04-15T14:14:15.952 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to stop rgw.foo.vm04.lqvrsn on host 'vm04' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: Reconfiguring prometheus.vm04 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'ceph-exporter.vm05', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'ceph-exporter.vm05', 'ingress', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: pgmap v70: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 204 KiB/s rd, 4.4 KiB/s wr, 372 op/s 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: Reconfiguring daemon prometheus.vm04 on vm04 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='client.14712 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='client.14716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.202 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:16.216 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: Reconfiguring prometheus.vm04 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'ceph-exporter.vm05', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm04', 'ceph-exporter.vm05', 'ingress', 'mgr.vm04.ycniad', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: pgmap v70: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 204 KiB/s rd, 4.4 KiB/s wr, 372 op/s 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: Reconfiguring daemon prometheus.vm04 on vm04 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='client.14712 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='client.14716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.217 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:16.417 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:16.417 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (21s) 2s ago 21s 104M - 20.2.0-19-g7ec4401a095 259950fb12cb 16381c88c865 2026-04-15T14:14:16.417 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (22s) 2s ago 22s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:16.417 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (22s) 0s ago 22s 102M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:16.417 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (23s) 0s ago 23s 104M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:16.688 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='client.14720 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.lqvrsn", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: Schedule stop daemon rgw.foo.vm04.lqvrsn 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 1.7 KiB/s wr, 163 op/s 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='client.14724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='client.14728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1293007126' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:14:17.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:14:17.589 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='client.14720 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.lqvrsn", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: Schedule stop daemon rgw.foo.vm04.lqvrsn 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 1.7 KiB/s wr, 163 op/s 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='client.14724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='client.14728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1293007126' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:14:17.590 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:14:18.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:18 vm05.local ceph-mon[57841]: Checking dashboard <-> RGW credentials 2026-04-15T14:14:18.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:18.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:14:18.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:18 vm04.local ceph-mon[53345]: Checking dashboard <-> RGW credentials 2026-04-15T14:14:18.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:18.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:14:19.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:19 vm05.local ceph-mon[57841]: pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 767 B/s wr, 88 op/s 2026-04-15T14:14:19.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:19 vm04.local ceph-mon[53345]: pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 767 B/s wr, 88 op/s 2026-04-15T14:14:21.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:21 vm05.local ceph-mon[57841]: pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 341 B/s wr, 30 op/s 2026-04-15T14:14:21.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:21 vm04.local ceph-mon[53345]: pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 341 B/s wr, 30 op/s 2026-04-15T14:14:21.909 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:22.106 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:22.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (26s) 4s ago 26s 104M - 20.2.0-19-g7ec4401a095 259950fb12cb 16381c88c865 2026-04-15T14:14:22.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (28s) 4s ago 28s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:22.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (27s) 5s ago 27s 102M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:22.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (29s) 5s ago 29s 104M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:22.346 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:14:22.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:22 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3803570671' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:23.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:22 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3803570671' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:23.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:23 vm05.local ceph-mon[57841]: from='client.14760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:23.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:23 vm05.local ceph-mon[57841]: pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T14:14:23.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:23 vm05.local ceph-mon[57841]: from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:24.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:23 vm04.local ceph-mon[53345]: from='client.14760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:24.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:23 vm04.local ceph-mon[53345]: pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T14:14:24.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:23 vm04.local ceph-mon[53345]: from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:25.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:25 vm05.local ceph-mon[57841]: pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T14:14:26.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:25 vm04.local ceph-mon[53345]: pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T14:14:27.572 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:27.764 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:27.764 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (32s) 10s ago 32s 104M - 20.2.0-19-g7ec4401a095 259950fb12cb 16381c88c865 2026-04-15T14:14:27.764 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (34s) 10s ago 34s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:27.764 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (33s) 11s ago 33s 102M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:27.764 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (35s) 11s ago 35s 104M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:27.826 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:27 vm04.local ceph-mon[53345]: pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 341 B/s wr, 31 op/s 2026-04-15T14:14:27.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:27 vm05.local ceph-mon[57841]: pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 341 B/s wr, 31 op/s 2026-04-15T14:14:28.044 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:14:28.831 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='client.14772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='client.14776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:14:28.832 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:28 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4162713939' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='client.14772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='client.14776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:14:28.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:28 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4162713939' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:29.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:29 vm04.local ceph-mon[53345]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T14:14:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:29 vm04.local ceph-mon[53345]: pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 341 B/s wr, 27 op/s 2026-04-15T14:14:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:29.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:29 vm05.local ceph-mon[57841]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T14:14:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:29 vm05.local ceph-mon[57841]: pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 341 B/s wr, 27 op/s 2026-04-15T14:14:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:30.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:30.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:14:30.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:14:30.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:30 vm05.local ceph-mon[57841]: pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 176 B/s wr, 21 op/s 2026-04-15T14:14:30.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:30.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:14:31.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:31.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:14:31.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:14:31.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:30 vm04.local ceph-mon[53345]: pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 176 B/s wr, 21 op/s 2026-04-15T14:14:31.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:31.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:14:32.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:31 vm04.local ceph-mon[53345]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T14:14:32.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:31 vm05.local ceph-mon[57841]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T14:14:33.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:32 vm04.local ceph-mon[53345]: pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 6 op/s 2026-04-15T14:14:33.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:32 vm05.local ceph-mon[57841]: pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 6 op/s 2026-04-15T14:14:33.292 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:33.482 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:33.482 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 3s ago 38s - - 2026-04-15T14:14:33.482 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (40s) 3s ago 39s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:33.482 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (39s) 4s ago 39s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:33.482 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (40s) 4s ago 40s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:33.724 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:14:33.724 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:14:33.725 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:14:35.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:34 vm04.local ceph-mon[53345]: from='client.14784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:35.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:34 vm04.local ceph-mon[53345]: from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:35.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:34 vm04.local ceph-mon[53345]: pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:35.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:34 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2706776029' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:35.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:34 vm05.local ceph-mon[57841]: from='client.14784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:35.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:34 vm05.local ceph-mon[57841]: from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:35.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:34 vm05.local ceph-mon[57841]: pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:35.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:34 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2706776029' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:37.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:36 vm04.local ceph-mon[53345]: pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:37.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:36 vm05.local ceph-mon[57841]: pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:38.942 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:39.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:38 vm04.local ceph-mon[53345]: pgmap v82: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:39.128 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:39.129 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 9s ago 43s - - 2026-04-15T14:14:39.129 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (45s) 9s ago 45s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:39.129 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (44s) 10s ago 44s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:39.129 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (46s) 10s ago 46s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:38 vm05.local ceph-mon[57841]: pgmap v82: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:39.359 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:14:39.359 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:14:39.359 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:14:40.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:39 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/340861685' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:40.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:39 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/340861685' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:41.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:40 vm04.local ceph-mon[53345]: from='client.14796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:41.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:40 vm04.local ceph-mon[53345]: from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:41.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:40 vm04.local ceph-mon[53345]: pgmap v83: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:41.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:40 vm05.local ceph-mon[57841]: from='client.14796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:41.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:40 vm05.local ceph-mon[57841]: from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:41.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:40 vm05.local ceph-mon[57841]: pgmap v83: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 264 B/s rd, 529 B/s wr, 0 op/s 2026-04-15T14:14:42.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:41 vm04.local ceph-mon[53345]: pgmap v84: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:14:42.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:41 vm05.local ceph-mon[57841]: pgmap v84: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:14:44.581 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:44.778 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:44.778 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 15s ago 49s - - 2026-04-15T14:14:44.778 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (51s) 15s ago 51s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:44.778 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (50s) 15s ago 50s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:44.778 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (52s) 15s ago 52s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:44.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:44 vm04.local ceph-mon[53345]: pgmap v85: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:14:44.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:44.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:45.044 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:14:45.045 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:14:45.045 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:14:45.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:44 vm05.local ceph-mon[57841]: pgmap v85: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:14:45.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:14:45.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:46.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:45 vm04.local ceph-mon[53345]: from='client.14808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:46.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:45 vm04.local ceph-mon[53345]: from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:46.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:45 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1616662285' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:46.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:45 vm05.local ceph-mon[57841]: from='client.14808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:46.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:45 vm05.local ceph-mon[57841]: from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:46.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:45 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1616662285' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:47.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:46 vm04.local ceph-mon[53345]: pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:47.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:46 vm05.local ceph-mon[57841]: pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:49.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:48 vm04.local ceph-mon[53345]: pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:49.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:48 vm05.local ceph-mon[57841]: pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:50.270 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:50.448 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:50.448 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 20s ago 55s - - 2026-04-15T14:14:50.448 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (57s) 20s ago 56s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:50.448 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (56s) 21s ago 56s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:50.448 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (57s) 21s ago 57s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:50.676 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:14:50.676 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:14:50.676 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:14:51.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:50 vm04.local ceph-mon[53345]: pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:51.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:50 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2803727639' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:51.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:50 vm05.local ceph-mon[57841]: pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:51.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:50 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2803727639' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:52.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:51 vm04.local ceph-mon[53345]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:52.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:51 vm04.local ceph-mon[53345]: from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:52.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:51 vm05.local ceph-mon[57841]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:52.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:51 vm05.local ceph-mon[57841]: from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:53.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:52 vm04.local ceph-mon[53345]: pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:53.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:52 vm05.local ceph-mon[57841]: pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:55.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:54 vm04.local ceph-mon[53345]: pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:55.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:54 vm05.local ceph-mon[57841]: pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:55.901 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:14:56.086 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:14:56.086 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 26s ago 60s - - 2026-04-15T14:14:56.086 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (62s) 26s ago 62s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:14:56.086 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (61s) 27s ago 61s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:14:56.086 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (63s) 27s ago 63s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:14:56.086 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:55 vm04.local ceph-mon[53345]: pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:56.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:55 vm05.local ceph-mon[57841]: pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:56.323 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:14:56.323 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:14:56.323 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:14:57.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:56 vm04.local ceph-mon[53345]: from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:57.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:56 vm04.local ceph-mon[53345]: from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:57.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:56 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3526560049' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:57.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:56 vm05.local ceph-mon[57841]: from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:57.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:56 vm05.local ceph-mon[57841]: from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:14:57.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:56 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3526560049' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:14:58.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:57 vm04.local ceph-mon[53345]: pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:58.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:57 vm05.local ceph-mon[57841]: pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:14:59.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:14:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:14:59.722 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:14:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:15:00.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:00 vm05.local ceph-mon[57841]: pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:00.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:00 vm04.local ceph-mon[53345]: pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:01.549 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:01.742 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:01.742 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 32s ago 66s - - 2026-04-15T14:15:01.742 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (68s) 32s ago 68s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:01.742 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (67s) 32s ago 67s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:01.742 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (69s) 32s ago 69s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:01.976 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:01.976 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:01.976 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:03.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:02 vm04.local ceph-mon[53345]: from='client.14844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:03.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:02 vm04.local ceph-mon[53345]: pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:03.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:02 vm04.local ceph-mon[53345]: from='client.14848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:03.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:02 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3874637920' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:03.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:02 vm05.local ceph-mon[57841]: from='client.14844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:03.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:02 vm05.local ceph-mon[57841]: pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:03.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:02 vm05.local ceph-mon[57841]: from='client.14848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:03.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:02 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3874637920' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:05.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:04 vm04.local ceph-mon[53345]: pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:05.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:04 vm05.local ceph-mon[57841]: pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:07.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:06 vm04.local ceph-mon[53345]: pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:07.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:06 vm05.local ceph-mon[57841]: pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:07.211 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:07.406 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:07.406 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 37s ago 72s - - 2026-04-15T14:15:07.406 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (73s) 37s ago 73s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:07.406 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (73s) 38s ago 73s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:07.406 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (74s) 38s ago 74s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:07.642 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:07.642 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:07.642 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:08.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:07 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/799877038' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:08.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:07 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/799877038' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:09.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:08 vm04.local ceph-mon[53345]: from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:09.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:08 vm04.local ceph-mon[53345]: from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:09.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:08 vm04.local ceph-mon[53345]: pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:09.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:08 vm05.local ceph-mon[57841]: from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:09.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:08 vm05.local ceph-mon[57841]: from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:09.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:08 vm05.local ceph-mon[57841]: pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:11.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:10 vm04.local ceph-mon[53345]: pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:11.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:10 vm05.local ceph-mon[57841]: pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:12.863 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:13.063 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:12 vm04.local ceph-mon[53345]: pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:13.063 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:13.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 43s ago 77s - - 2026-04-15T14:15:13.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (79s) 43s ago 79s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:13.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (78s) 44s ago 78s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:13.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (80s) 44s ago 80s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:13.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:12 vm05.local ceph-mon[57841]: pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:13.301 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:13.301 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:13.301 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:14.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:13 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2578468419' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:14.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:13 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2578468419' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:15.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:14 vm04.local ceph-mon[53345]: from='client.14868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:15.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:14 vm04.local ceph-mon[53345]: from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:15.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:14 vm04.local ceph-mon[53345]: pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:15.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:15:15.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:14 vm05.local ceph-mon[57841]: from='client.14868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:15.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:14 vm05.local ceph-mon[57841]: from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:15.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:14 vm05.local ceph-mon[57841]: pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:15.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:15:16.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:15 vm05.local ceph-mon[57841]: pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:16.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:15 vm04.local ceph-mon[53345]: pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:18.513 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:18.704 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:18.704 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 49s ago 83s - - 2026-04-15T14:15:18.704 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (85s) 49s ago 85s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:18.704 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (84s) 49s ago 84s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:18.704 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (86s) 49s ago 86s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:18.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:18 vm04.local ceph-mon[53345]: pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:18.946 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:18.946 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:18.946 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:19.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:18 vm05.local ceph-mon[57841]: pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:20.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:19 vm04.local ceph-mon[53345]: from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:20.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:19 vm04.local ceph-mon[53345]: from='client.14884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:20.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:19 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2556081325' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:20.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:19 vm05.local ceph-mon[57841]: from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:20.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:19 vm05.local ceph-mon[57841]: from='client.14884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:20.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:19 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2556081325' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:21.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:20 vm04.local ceph-mon[53345]: pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:21.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:20 vm05.local ceph-mon[57841]: pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:23.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:22 vm04.local ceph-mon[53345]: pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:23.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:22 vm05.local ceph-mon[57841]: pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:24.163 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:24.357 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:24.357 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 54s ago 89s - - 2026-04-15T14:15:24.357 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (90s) 54s ago 90s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:24.357 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (90s) 55s ago 89s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:24.357 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (91s) 55s ago 91s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:24.588 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:24.589 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:24.589 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:24.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:24 vm04.local ceph-mon[53345]: pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:24.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:24 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3189697469' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:25.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:24 vm05.local ceph-mon[57841]: pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:25.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:24 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3189697469' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:26.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:25 vm04.local ceph-mon[53345]: from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:26.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:25 vm04.local ceph-mon[53345]: from='client.14896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:26.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:25 vm05.local ceph-mon[57841]: from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:26.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:25 vm05.local ceph-mon[57841]: from='client.14896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:27.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:26 vm04.local ceph-mon[53345]: pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:27.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:26 vm05.local ceph-mon[57841]: pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:28.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:27 vm04.local ceph-mon[53345]: pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:28.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:27 vm05.local ceph-mon[57841]: pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:29.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:15:29.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:15:29.838 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:30.037 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:30.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 60s ago 94s - - 2026-04-15T14:15:30.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (96s) 60s ago 96s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:30.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (95s) 61s ago 95s 103M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:30.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (97s) 61s ago 97s 105M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:30.328 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:30.328 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:30.328 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:30.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:30 vm04.local ceph-mon[53345]: pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:30.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:15:30.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:30 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3939419713' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:30.689 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:30 vm05.local ceph-mon[57841]: pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:30.689 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:15:30.689 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:30 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3939419713' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:31.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:31 vm04.local ceph-mon[53345]: from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:31.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:31 vm04.local ceph-mon[53345]: from='client.14908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:31.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:31 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:31.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:31 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:31.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:31 vm05.local ceph-mon[57841]: from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:31.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:31 vm05.local ceph-mon[57841]: from='client.14908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:31.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:31 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:31.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:31 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:32 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:15:32.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:32 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:15:34.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:33 vm04.local ceph-mon[53345]: pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:34.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:33 vm05.local ceph-mon[57841]: pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:35.558 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:35.747 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:35.747 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 4s ago 100s - - 2026-04-15T14:15:35.747 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (102s) 4s ago 102s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:35.747 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (101s) 4s ago 101s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:35.747 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (103s) 4s ago 103s 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:35.988 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:35.988 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:35.988 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:36.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:35 vm04.local ceph-mon[53345]: from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:36.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:35 vm04.local ceph-mon[53345]: from='client.14920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:36.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:35 vm04.local ceph-mon[53345]: pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:36.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:35 vm05.local ceph-mon[57841]: from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:36.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:35 vm05.local ceph-mon[57841]: from='client.14920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:36.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:35 vm05.local ceph-mon[57841]: pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:37.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:36 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/284636105' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:37.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:36 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/284636105' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:38.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:38 vm04.local ceph-mon[53345]: pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:38.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:38 vm05.local ceph-mon[57841]: pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:40.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:39 vm04.local ceph-mon[53345]: pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:40.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:39 vm05.local ceph-mon[57841]: pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 381 B/s rd, 762 B/s wr, 1 op/s 2026-04-15T14:15:41.210 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:41.403 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:41.404 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 9s ago 106s - - 2026-04-15T14:15:41.404 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (107s) 9s ago 107s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:41.404 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (107s) 10s ago 107s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:41.404 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (108s) 10s ago 108s 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:41.662 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:41.663 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:41.663 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:42.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:41 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3127993970' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:42.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:41 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3127993970' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:43.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:42 vm04.local ceph-mon[53345]: from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:43.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:42 vm04.local ceph-mon[53345]: from='client.14932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:43.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:42 vm04.local ceph-mon[53345]: pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-04-15T14:15:43.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:42 vm05.local ceph-mon[57841]: from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:43.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:42 vm05.local ceph-mon[57841]: from='client.14932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:43.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:42 vm05.local ceph-mon[57841]: pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-04-15T14:15:44.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:43 vm04.local ceph-mon[53345]: pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:44.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:43 vm05.local ceph-mon[57841]: pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:15:45.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:15:45.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:15:46.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:46 vm04.local ceph-mon[53345]: pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:46.440 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:46 vm05.local ceph-mon[57841]: pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:46.869 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:47.061 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:47.061 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 15s ago 111s - - 2026-04-15T14:15:47.061 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (113s) 15s ago 113s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:47.061 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (112s) 16s ago 112s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:47.061 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (114s) 16s ago 114s 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:47.307 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:47.307 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:47.307 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:47.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:47 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1191997999' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:47.690 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:47 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1191997999' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:48.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:48 vm04.local ceph-mon[53345]: from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:48.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:48 vm04.local ceph-mon[53345]: from='client.14944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:48.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:48 vm04.local ceph-mon[53345]: pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:48.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:48 vm05.local ceph-mon[57841]: from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:48.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:48 vm05.local ceph-mon[57841]: from='client.14944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:48.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:48 vm05.local ceph-mon[57841]: pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:50.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:49 vm04.local ceph-mon[53345]: pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:50.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:49 vm05.local ceph-mon[57841]: pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:52.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:51 vm04.local ceph-mon[53345]: pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:52.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:51 vm05.local ceph-mon[57841]: pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:52.535 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:52.725 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:52.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 21s ago 117s - - 2026-04-15T14:15:52.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (119s) 21s ago 119s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:52.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (118s) 21s ago 118s 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:52.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 21s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:52.861 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:52 vm04.local ceph-mon[53345]: from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:52.862 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:52 vm04.local ceph-mon[53345]: from='client.14956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:52.958 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:52.958 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:52.958 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:52 vm05.local ceph-mon[57841]: from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:52 vm05.local ceph-mon[57841]: from='client.14956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:54.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:53 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3855340945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:54.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:53 vm04.local ceph-mon[53345]: pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:54.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:53 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3855340945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:54.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:53 vm05.local ceph-mon[57841]: pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:56.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:55 vm04.local ceph-mon[53345]: pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:56.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:55 vm05.local ceph-mon[57841]: pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:58.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:57 vm04.local ceph-mon[53345]: pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:58.177 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:15:58.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:57 vm05.local ceph-mon[57841]: pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:15:58.361 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:15:58.361 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 26s ago 2m - - 2026-04-15T14:15:58.361 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 26s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:15:58.361 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 27s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:15:58.361 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 27s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:15:58.610 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:15:58.610 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:15:58.610 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:15:58.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:58 vm04.local ceph-mon[53345]: from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:58.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:58 vm04.local ceph-mon[53345]: from='client.14968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:58.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:58 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3714364750' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:15:59.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:58 vm05.local ceph-mon[57841]: from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:59.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:58 vm05.local ceph-mon[57841]: from='client.14968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:15:59.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:58 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3714364750' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:00.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:00.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:15:59 vm04.local ceph-mon[53345]: pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:15:59 vm05.local ceph-mon[57841]: pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:02.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:01 vm05.local ceph-mon[57841]: pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:02.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:01 vm04.local ceph-mon[53345]: pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:03.834 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:04.031 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:04.031 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 32s ago 2m - - 2026-04-15T14:16:04.031 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 32s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:04.031 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 33s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:04.031 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 33s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:04.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:03 vm04.local ceph-mon[53345]: pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:04.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:03 vm05.local ceph-mon[57841]: pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:04.273 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:04.273 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:04.273 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:05.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:04 vm04.local ceph-mon[53345]: from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:05.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:04 vm04.local ceph-mon[53345]: from='client.14980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:05.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:04 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/878349537' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:05.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:04 vm05.local ceph-mon[57841]: from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:05.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:04 vm05.local ceph-mon[57841]: from='client.14980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:05.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:04 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/878349537' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:06.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:05 vm05.local ceph-mon[57841]: pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:06.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:05 vm04.local ceph-mon[53345]: pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:08.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:07 vm04.local ceph-mon[53345]: pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:08.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:07 vm05.local ceph-mon[57841]: pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:09.480 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:09.662 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:09.662 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 38s ago 2m - - 2026-04-15T14:16:09.662 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 38s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:09.662 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 38s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:09.662 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 38s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:09.905 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:09.905 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:09.905 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:10.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:09 vm04.local ceph-mon[53345]: from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:10.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:09 vm04.local ceph-mon[53345]: from='client.14992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:10.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:09 vm04.local ceph-mon[53345]: pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:09 vm05.local ceph-mon[57841]: from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:09 vm05.local ceph-mon[57841]: from='client.14992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:09 vm05.local ceph-mon[57841]: pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:11.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:10 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1040811167' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:11.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:10 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1040811167' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:12.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:11 vm04.local ceph-mon[53345]: pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:12.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:11 vm05.local ceph-mon[57841]: pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:14.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:13 vm04.local ceph-mon[53345]: pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:13 vm05.local ceph-mon[57841]: pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:15.122 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:15.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:15.314 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:15.314 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:15.314 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 43s ago 2m - - 2026-04-15T14:16:15.315 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 43s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:15.315 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 44s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:15.315 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 44s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:15.582 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:15.582 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:15.582 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:16.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:15 vm05.local ceph-mon[57841]: from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:16.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:15 vm05.local ceph-mon[57841]: from='client.15004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:16.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:15 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2890428458' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:16.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:15 vm05.local ceph-mon[57841]: pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:16.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:15 vm04.local ceph-mon[53345]: from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:16.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:15 vm04.local ceph-mon[53345]: from='client.15004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:16.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:15 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2890428458' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:16.370 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:15 vm04.local ceph-mon[53345]: pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:18.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:17 vm04.local ceph-mon[53345]: pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:18.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:17 vm05.local ceph-mon[57841]: pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:20.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:19 vm04.local ceph-mon[53345]: pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:20.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:19 vm05.local ceph-mon[57841]: pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:20.795 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:20.990 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:20.990 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 49s ago 2m - - 2026-04-15T14:16:20.990 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 49s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:20.990 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 50s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:20.990 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 50s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:21.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:20 vm04.local ceph-mon[53345]: from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:21.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:20 vm05.local ceph-mon[57841]: from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:21.231 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:21.231 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:21.231 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:22.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:21 vm04.local ceph-mon[53345]: from='client.15016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:22.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:21 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2641130021' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:22.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:21 vm04.local ceph-mon[53345]: pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:22.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:21 vm05.local ceph-mon[57841]: from='client.15016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:22.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:21 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2641130021' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:22.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:21 vm05.local ceph-mon[57841]: pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:24.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:23 vm04.local ceph-mon[53345]: pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:24.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:23 vm05.local ceph-mon[57841]: pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:26.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:25 vm04.local ceph-mon[53345]: pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:26.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:25 vm05.local ceph-mon[57841]: pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:26.457 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:26.655 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:26.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 55s ago 2m - - 2026-04-15T14:16:26.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 55s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:26.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 55s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:26.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 55s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:26.920 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:26.920 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:26.921 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:27.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:26 vm04.local ceph-mon[53345]: from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:27.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:26 vm04.local ceph-mon[53345]: from='client.15028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:27.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:26 vm05.local ceph-mon[57841]: from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:27.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:26 vm05.local ceph-mon[57841]: from='client.15028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:28.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:27 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3679543457' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:28.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:27 vm04.local ceph-mon[53345]: pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:28.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:27 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3679543457' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:28.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:27 vm05.local ceph-mon[57841]: pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:29.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:29.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:30.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:30 vm05.local ceph-mon[57841]: pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:30.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:30 vm04.local ceph-mon[53345]: pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:32.113 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:31 vm05.local ceph-mon[57841]: pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:32.113 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:31 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:16:32.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:31 vm04.local ceph-mon[53345]: pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:32.119 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:31 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:16:32.163 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:32.356 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:32.356 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 60s ago 2m - - 2026-04-15T14:16:32.356 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 60s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:32.356 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 61s ago 2m 108M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:32.356 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 61s ago 2m 110M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:32.620 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:32.621 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:32.621 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:32.847 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:32 vm05.local ceph-mon[57841]: from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:32.848 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:32 vm05.local ceph-mon[57841]: from='client.15040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:32.848 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:32 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1847275680' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:32.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:32 vm04.local ceph-mon[53345]: from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:32.870 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:32 vm04.local ceph-mon[53345]: from='client.15040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:32.870 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:32 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1847275680' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:34.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:16:34.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:16:34.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:16:35.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:35 vm04.local ceph-mon[53345]: pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 297 B/s rd, 594 B/s wr, 0 op/s 2026-04-15T14:16:35.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:35 vm04.local ceph-mon[53345]: pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:35.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:35 vm05.local ceph-mon[57841]: pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 297 B/s rd, 594 B/s wr, 0 op/s 2026-04-15T14:16:35.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:35 vm05.local ceph-mon[57841]: pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:37.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:37 vm04.local ceph-mon[53345]: pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:37.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:37 vm05.local ceph-mon[57841]: pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:37.848 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:38.032 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:38.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 4s ago 2m - - 2026-04-15T14:16:38.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 4s ago 2m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:38.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 4s ago 2m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:38.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 4s ago 2m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:38.265 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:38.265 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:38.265 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:39.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:39 vm04.local ceph-mon[53345]: from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:39.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:39 vm04.local ceph-mon[53345]: from='client.15052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:39.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:39 vm04.local ceph-mon[53345]: pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:39.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:39 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1815118809' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:39.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:39 vm05.local ceph-mon[57841]: from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:39.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:39 vm05.local ceph-mon[57841]: from='client.15052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:39.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:39 vm05.local ceph-mon[57841]: pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:39.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:39 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1815118809' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:41.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:41 vm04.local ceph-mon[53345]: pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:41.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:41 vm05.local ceph-mon[57841]: pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 368 B/s rd, 737 B/s wr, 1 op/s 2026-04-15T14:16:43.500 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:43.500 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:43 vm04.local ceph-mon[53345]: pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:43.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:43 vm05.local ceph-mon[57841]: pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:43.693 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:43.693 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 9s ago 2m - - 2026-04-15T14:16:43.693 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 9s ago 2m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:43.693 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 10s ago 2m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:43.693 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 10s ago 2m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:43.941 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:43.941 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:43.941 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:44.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:44 vm04.local ceph-mon[53345]: from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:44.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:44 vm04.local ceph-mon[53345]: from='client.15064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:44.620 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:44 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2516371023' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:44.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:44 vm05.local ceph-mon[57841]: from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:44.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:44 vm05.local ceph-mon[57841]: from='client.15064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:44.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:44 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2516371023' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:45.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:45 vm04.local ceph-mon[53345]: pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:45.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:45 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:45.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:45 vm05.local ceph-mon[57841]: pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:45.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:45 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:16:47.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:47 vm04.local ceph-mon[53345]: pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:47.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:47 vm05.local ceph-mon[57841]: pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:49.156 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:49.340 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:49.340 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 15s ago 2m - - 2026-04-15T14:16:49.341 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 15s ago 2m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:49.341 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 16s ago 2m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:49.341 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (2m) 16s ago 2m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:49.578 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:49.578 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:49.578 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:49.578 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:49 vm04.local ceph-mon[53345]: pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:49.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:49 vm05.local ceph-mon[57841]: pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:50.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:50 vm04.local ceph-mon[53345]: from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:50.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:50 vm04.local ceph-mon[53345]: from='client.15076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:50.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:50 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1758272160' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:50.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:50 vm05.local ceph-mon[57841]: from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:50.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:50 vm05.local ceph-mon[57841]: from='client.15076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:50.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:50 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1758272160' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:51.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:51 vm04.local ceph-mon[53345]: pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:51.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:51 vm05.local ceph-mon[57841]: pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:53.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:53 vm04.local ceph-mon[53345]: pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:53.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:53 vm05.local ceph-mon[57841]: pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:54.786 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:16:54.969 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:16:54.969 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 21s ago 2m - - 2026-04-15T14:16:54.969 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 21s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:16:54.969 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 21s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:16:54.969 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 21s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:16:55.205 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:16:55.205 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:16:55.205 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:16:55.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:55 vm04.local ceph-mon[53345]: pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:55.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:55 vm04.local ceph-mon[53345]: from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:55.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:55 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2422450553' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:55.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:55 vm05.local ceph-mon[57841]: pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:55.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:55 vm05.local ceph-mon[57841]: from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:55.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:55 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2422450553' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:16:56.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:56 vm04.local ceph-mon[53345]: from='client.15088 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:56.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:56 vm05.local ceph-mon[57841]: from='client.15088 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:16:57.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:57 vm04.local ceph-mon[53345]: pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:57.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:57 vm05.local ceph-mon[57841]: pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:59.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:16:59 vm04.local ceph-mon[53345]: pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:16:59.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:16:59 vm05.local ceph-mon[57841]: pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:00.415 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:00.601 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:00.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 26s ago 3m - - 2026-04-15T14:17:00.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 26s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:00.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 27s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:00.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 27s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:00.834 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:00.834 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:00.834 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:00.835 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:01.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:01 vm05.local ceph-mon[57841]: pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:01.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:01 vm05.local ceph-mon[57841]: from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:01.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:01 vm05.local ceph-mon[57841]: from='client.15100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:01.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:01 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1566314380' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:01.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:01 vm04.local ceph-mon[53345]: pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:01.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:01 vm04.local ceph-mon[53345]: from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:01.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:01 vm04.local ceph-mon[53345]: from='client.15100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:01.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:01 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1566314380' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:03.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:03 vm05.local ceph-mon[57841]: pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:03.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:03 vm04.local ceph-mon[53345]: pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:05.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:05 vm05.local ceph-mon[57841]: pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:05.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:05 vm04.local ceph-mon[53345]: pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:06.061 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:06.260 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:06.260 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 32s ago 3m - - 2026-04-15T14:17:06.260 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 32s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:06.260 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 33s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:06.260 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 33s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:06.499 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:06.499 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:06.499 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:07.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:07 vm05.local ceph-mon[57841]: from='client.15108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:07.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:07 vm05.local ceph-mon[57841]: pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:07.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:07 vm05.local ceph-mon[57841]: from='client.15112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:07.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:07 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2922183707' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:07.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:07 vm04.local ceph-mon[53345]: from='client.15108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:07.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:07 vm04.local ceph-mon[53345]: pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:07.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:07 vm04.local ceph-mon[53345]: from='client.15112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:07.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:07 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2922183707' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:09.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:09 vm05.local ceph-mon[57841]: pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:09.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:09 vm04.local ceph-mon[53345]: pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:11.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:11 vm05.local ceph-mon[57841]: pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:11.724 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:11.724 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:11 vm04.local ceph-mon[53345]: pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:11.917 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:11.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 38s ago 3m - - 2026-04-15T14:17:11.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 38s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:11.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 38s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:11.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 38s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:12.163 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:12.163 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:12.163 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:12.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:12 vm04.local ceph-mon[53345]: from='client.15120 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:12.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:12 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/243025781' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:12.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:12 vm05.local ceph-mon[57841]: from='client.15120 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:12.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:12 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/243025781' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:13.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:13 vm04.local ceph-mon[53345]: from='client.15124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:13.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:13 vm04.local ceph-mon[53345]: pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:13.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:13 vm05.local ceph-mon[57841]: from='client.15124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:13.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:13 vm05.local ceph-mon[57841]: pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:14.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:15.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:15 vm04.local ceph-mon[53345]: pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:15.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:15 vm05.local ceph-mon[57841]: pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:17.396 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:17.590 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:17.590 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 43s ago 3m - - 2026-04-15T14:17:17.590 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 43s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:17.590 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 44s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:17.590 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 44s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:17.858 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:17.858 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:17.858 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:17.859 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:17 vm04.local ceph-mon[53345]: pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:17.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:17 vm05.local ceph-mon[57841]: pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:18.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:18 vm04.local ceph-mon[53345]: from='client.15132 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:18.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:18 vm04.local ceph-mon[53345]: from='client.15136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:18.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:18 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2180116653' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:18.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:18 vm05.local ceph-mon[57841]: from='client.15132 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:18.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:18 vm05.local ceph-mon[57841]: from='client.15136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:18.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:18 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2180116653' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:19.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:19 vm04.local ceph-mon[53345]: pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:19.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:19 vm05.local ceph-mon[57841]: pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:21.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:21 vm04.local ceph-mon[53345]: pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:21.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:21 vm05.local ceph-mon[57841]: pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:23.085 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:23.285 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:23.285 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 49s ago 3m - - 2026-04-15T14:17:23.285 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 49s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:23.285 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 50s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:23.285 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 50s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:23.563 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:23.563 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:23.563 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:23.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:23 vm04.local ceph-mon[53345]: pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:23.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:23 vm05.local ceph-mon[57841]: pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:24.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:24 vm04.local ceph-mon[53345]: from='client.15144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:24.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:24 vm04.local ceph-mon[53345]: from='client.15148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:24.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:24 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4125819141' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:24.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:24 vm05.local ceph-mon[57841]: from='client.15144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:24.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:24 vm05.local ceph-mon[57841]: from='client.15148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:24.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:24 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4125819141' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:25.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:25 vm04.local ceph-mon[53345]: pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:25 vm05.local ceph-mon[57841]: pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:27.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:27 vm04.local ceph-mon[53345]: pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:27.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:27 vm05.local ceph-mon[57841]: pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:28.786 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:28.973 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:28.973 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 55s ago 3m - - 2026-04-15T14:17:28.973 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 55s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:28.973 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 55s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:28.973 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 55s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:29.203 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:29.203 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:29.203 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:29 vm04.local ceph-mon[53345]: pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:29 vm04.local ceph-mon[53345]: from='client.15156 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:29 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/341730447' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:29.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:29 vm05.local ceph-mon[57841]: pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:29 vm05.local ceph-mon[57841]: from='client.15156 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:29 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/341730447' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:30.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:30 vm04.local ceph-mon[53345]: from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:30.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:30 vm05.local ceph-mon[57841]: from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:31.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:31 vm04.local ceph-mon[53345]: pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:31.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:31 vm05.local ceph-mon[57841]: pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:33.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:33 vm04.local ceph-mon[53345]: pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:33.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:33 vm05.local ceph-mon[57841]: pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:34.437 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:34.635 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:34.635 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 60s ago 3m - - 2026-04-15T14:17:34.635 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 60s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:34.636 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 61s ago 3m 113M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:34.636 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 61s ago 3m 115M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:34.651 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:17:34.788 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:17:34.911 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:34.911 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:34.911 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:35.539 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:35 vm04.local ceph-mon[53345]: pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:35.539 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:35 vm04.local ceph-mon[53345]: from='client.15168 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:35.539 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:35 vm04.local ceph-mon[53345]: from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:35.539 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:35 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2132211351' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:35.539 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:35 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:35.539 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:35 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:35.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:35 vm05.local ceph-mon[57841]: pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:35.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:35 vm05.local ceph-mon[57841]: from='client.15168 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:35.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:35 vm05.local ceph-mon[57841]: from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:35.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:35 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2132211351' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:35.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:35 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:35.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:35 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 301 B/s rd, 602 B/s wr, 0 op/s 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 374 B/s rd, 749 B/s wr, 1 op/s 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:37.369 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 301 B/s rd, 602 B/s wr, 0 op/s 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 374 B/s rd, 749 B/s wr, 1 op/s 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:17:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:17:39.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:39 vm04.local ceph-mon[53345]: pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 374 B/s rd, 749 B/s wr, 1 op/s 2026-04-15T14:17:39.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:39 vm05.local ceph-mon[57841]: pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 374 B/s rd, 749 B/s wr, 1 op/s 2026-04-15T14:17:40.145 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:40.360 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:40.360 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 4s ago 3m - - 2026-04-15T14:17:40.360 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 4s ago 3m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:40.360 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 5s ago 3m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:40.360 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 5s ago 3m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:40.621 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:40.621 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:40.621 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:41.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:41 vm04.local ceph-mon[53345]: from='client.15180 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:41.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:41 vm04.local ceph-mon[53345]: pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 374 B/s rd, 749 B/s wr, 1 op/s 2026-04-15T14:17:41.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:41 vm04.local ceph-mon[53345]: from='client.15184 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:41.619 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:41 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3581344477' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:41.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:41 vm05.local ceph-mon[57841]: from='client.15180 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:41.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:41 vm05.local ceph-mon[57841]: pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 374 B/s rd, 749 B/s wr, 1 op/s 2026-04-15T14:17:41.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:41 vm05.local ceph-mon[57841]: from='client.15184 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:41.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:41 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3581344477' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:43.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:43 vm05.local ceph-mon[57841]: pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:43.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:43 vm04.local ceph-mon[53345]: pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:44.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:44.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:45.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:45 vm05.local ceph-mon[57841]: pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:45.839 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:45.839 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:45 vm04.local ceph-mon[53345]: pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:46.025 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:46.025 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 9s ago 3m - - 2026-04-15T14:17:46.025 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 9s ago 3m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:46.025 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 10s ago 3m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:46.025 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 10s ago 3m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:46.265 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:46.265 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:46.265 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:46.621 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:46 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3787564125' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:46.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:46 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3787564125' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:47.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:47 vm05.local ceph-mon[57841]: from='client.15192 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:47.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:47 vm05.local ceph-mon[57841]: from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:47.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:47 vm05.local ceph-mon[57841]: pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:47.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:47 vm04.local ceph-mon[53345]: from='client.15192 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:47.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:47 vm04.local ceph-mon[53345]: from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:47.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:47 vm04.local ceph-mon[53345]: pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:49.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:49 vm05.local ceph-mon[57841]: pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:49.722 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:49 vm04.local ceph-mon[53345]: pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:51.486 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:51.692 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:51 vm04.local ceph-mon[53345]: pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:51.692 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:51.692 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 15s ago 3m - - 2026-04-15T14:17:51.692 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 15s ago 3m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:51.692 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 16s ago 3m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:51.692 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (3m) 16s ago 3m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:51.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:51 vm05.local ceph-mon[57841]: pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:51.943 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:51.943 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:51.943 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:52.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:52 vm05.local ceph-mon[57841]: from='client.15204 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:52.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:52 vm05.local ceph-mon[57841]: from='client.15208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:52.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:52 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3320521601' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:52.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:52 vm04.local ceph-mon[53345]: from='client.15204 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:52.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:52 vm04.local ceph-mon[53345]: from='client.15208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:52.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:52 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3320521601' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:53.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:53 vm05.local ceph-mon[57841]: pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:53.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:53 vm04.local ceph-mon[53345]: pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:55.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:55 vm05.local ceph-mon[57841]: pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:55.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:55 vm04.local ceph-mon[53345]: pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:57.164 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:17:57.371 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:17:57.371 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 21s ago 4m - - 2026-04-15T14:17:57.371 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 21s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:17:57.371 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 22s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:17:57.371 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 22s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:17:57.440 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:57 vm04.local ceph-mon[53345]: pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:57.615 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:17:57.616 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:17:57.616 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:17:57.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:57 vm05.local ceph-mon[57841]: pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:58.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:58 vm04.local ceph-mon[53345]: from='client.15216 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:58.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:58 vm04.local ceph-mon[53345]: from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:58.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:58 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3421392389' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:58.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:58 vm05.local ceph-mon[57841]: from='client.15216 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:58.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:58 vm05.local ceph-mon[57841]: from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:17:58.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:58 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3421392389' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:17:59.722 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:59 vm04.local ceph-mon[53345]: pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:59.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:17:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:17:59.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:59 vm05.local ceph-mon[57841]: pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:17:59.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:17:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:18:01.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:01 vm04.local ceph-mon[53345]: pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:01.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:01 vm05.local ceph-mon[57841]: pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:02.833 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:03.017 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:03.017 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 26s ago 4m - - 2026-04-15T14:18:03.017 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 26s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:03.017 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 27s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:03.017 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 27s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:03.261 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:03.262 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:03.262 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:03.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:03 vm04.local ceph-mon[53345]: pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:03.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:03 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1281524664' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:03.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:03 vm05.local ceph-mon[57841]: pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:03.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:03 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1281524664' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:04.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:04 vm04.local ceph-mon[53345]: from='client.15228 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:04.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:04 vm04.local ceph-mon[53345]: from='client.24757 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:04.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:04 vm05.local ceph-mon[57841]: from='client.15228 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:04.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:04 vm05.local ceph-mon[57841]: from='client.24757 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:05.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:05 vm04.local ceph-mon[53345]: pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:05.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:05 vm05.local ceph-mon[57841]: pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:07.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:07 vm04.local ceph-mon[53345]: pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:07 vm05.local ceph-mon[57841]: pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:08.491 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:08.681 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:08.681 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 32s ago 4m - - 2026-04-15T14:18:08.681 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 32s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:08.681 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 33s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:08.681 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 33s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:08.938 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:08.939 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:08.939 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:09 vm04.local ceph-mon[53345]: pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:09 vm04.local ceph-mon[53345]: from='client.15240 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:09.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:09 vm04.local ceph-mon[53345]: from='client.15244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:09.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:09 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/485934870' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:09.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:09 vm05.local ceph-mon[57841]: pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:09.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:09 vm05.local ceph-mon[57841]: from='client.15240 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:09.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:09 vm05.local ceph-mon[57841]: from='client.15244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:09.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:09 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/485934870' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:11.743 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:11 vm05.local ceph-mon[57841]: pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:11.845 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:11 vm04.local ceph-mon[53345]: pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:13.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:13 vm04.local ceph-mon[53345]: pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:13.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:13 vm05.local ceph-mon[57841]: pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:14.167 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:14.362 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:14.362 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 38s ago 4m - - 2026-04-15T14:18:14.362 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 38s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:14.362 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 39s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:14.362 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 39s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:14.637 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:14.637 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:14.637 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:14.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:18:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:18:15.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:15 vm04.local ceph-mon[53345]: from='client.15252 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:15.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:15 vm04.local ceph-mon[53345]: pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:15.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:15 vm04.local ceph-mon[53345]: from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:15.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:15 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2018294326' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:15.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:15 vm05.local ceph-mon[57841]: from='client.15252 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:15.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:15 vm05.local ceph-mon[57841]: pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:15.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:15 vm05.local ceph-mon[57841]: from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:15.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:15 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2018294326' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:17.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:17 vm04.local ceph-mon[53345]: pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:17.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:17 vm05.local ceph-mon[57841]: pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:19.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:19 vm04.local ceph-mon[53345]: pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:19.886 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:19.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:19 vm05.local ceph-mon[57841]: pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:20.096 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:20.096 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 44s ago 4m - - 2026-04-15T14:18:20.096 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 44s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:20.096 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 44s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:20.096 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 44s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:20.370 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:20.370 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:20.370 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:20.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:20 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2584325577' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:20 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2584325577' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:21.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:21 vm04.local ceph-mon[53345]: from='client.15264 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:21.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:21 vm04.local ceph-mon[53345]: from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:21.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:21 vm04.local ceph-mon[53345]: pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:21.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:21 vm05.local ceph-mon[57841]: from='client.15264 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:21.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:21 vm05.local ceph-mon[57841]: from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:21.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:21 vm05.local ceph-mon[57841]: pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:23.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:23 vm05.local ceph-mon[57841]: pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:24.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:23 vm04.local ceph-mon[53345]: pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:25.616 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:25.831 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:25.839 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 49s ago 4m - - 2026-04-15T14:18:25.839 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 49s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:25.839 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 50s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:25.839 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 50s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:25 vm04.local ceph-mon[53345]: pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:26.094 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:26.094 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:26.094 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:26.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:25 vm05.local ceph-mon[57841]: pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:27.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:26 vm04.local ceph-mon[53345]: from='client.15276 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:27.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:26 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2213920480' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:26 vm05.local ceph-mon[57841]: from='client.15276 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:26 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2213920480' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:28.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:27 vm04.local ceph-mon[53345]: from='client.15280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:28.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:27 vm04.local ceph-mon[53345]: pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:28.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:27 vm05.local ceph-mon[57841]: from='client.15280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:28.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:27 vm05.local ceph-mon[57841]: pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:29.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:28 vm05.local ceph-mon[57841]: pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:29.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:28 vm04.local ceph-mon[53345]: pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:18:30.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:18:31.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:30 vm05.local ceph-mon[57841]: pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:31.322 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:31.322 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:30 vm04.local ceph-mon[53345]: pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:31.523 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:31.523 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 55s ago 4m - - 2026-04-15T14:18:31.523 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 55s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:31.523 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 56s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:31.523 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 56s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:31.777 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:31.777 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:31.777 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:32.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:31 vm04.local ceph-mon[53345]: from='client.15288 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:32.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:31 vm04.local ceph-mon[53345]: from='client.15292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:32.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:31 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1825590640' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:32.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:31 vm05.local ceph-mon[57841]: from='client.15288 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:32.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:31 vm05.local ceph-mon[57841]: from='client.15292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:32.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:31 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1825590640' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:33.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:32 vm05.local ceph-mon[57841]: pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:33.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:32 vm04.local ceph-mon[53345]: pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:35.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:35 vm05.local ceph-mon[57841]: pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:35.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:35 vm04.local ceph-mon[53345]: pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:37.024 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:37.231 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:37.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 61s ago 4m - - 2026-04-15T14:18:37.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 61s ago 4m 117M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:37.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 61s ago 4m 118M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:37.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 61s ago 4m 119M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:37.376 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:37 vm05.local ceph-mon[57841]: pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:37.376 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:18:37.493 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:37.493 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:37.493 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:37.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:37 vm04.local ceph-mon[53345]: pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:37.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:18:38.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:38 vm04.local ceph-mon[53345]: from='client.15298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:38.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:38 vm04.local ceph-mon[53345]: from='client.15302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:38.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:38 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/209952315' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:38.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:38.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:38.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:38 vm05.local ceph-mon[57841]: from='client.15298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:38.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:38 vm05.local ceph-mon[57841]: from='client.15302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:38.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:38 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/209952315' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:38.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:38.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 294 B/s rd, 588 B/s wr, 0 op/s 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 363 B/s rd, 727 B/s wr, 1 op/s 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:39.723 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:18:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:18:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:18:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:18:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 294 B/s rd, 588 B/s wr, 0 op/s 2026-04-15T14:18:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 363 B/s rd, 727 B/s wr, 1 op/s 2026-04-15T14:18:39.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:18:39.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:18:42.086 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:41 vm04.local ceph-mon[53345]: pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 363 B/s rd, 727 B/s wr, 1 op/s 2026-04-15T14:18:42.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:41 vm05.local ceph-mon[57841]: pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 363 B/s rd, 727 B/s wr, 1 op/s 2026-04-15T14:18:42.733 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:42.924 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:42.924 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 4s ago 4m - - 2026-04-15T14:18:42.924 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4s ago 4m 121M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:42.924 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 5s ago 4m 122M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:42.924 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 5s ago 4m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:42 vm05.local ceph-mon[57841]: from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:42 vm05.local ceph-mon[57841]: pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:43.196 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:43.197 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:43.197 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:43.197 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:42 vm04.local ceph-mon[53345]: from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:43.197 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:42 vm04.local ceph-mon[53345]: pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:43 vm04.local ceph-mon[53345]: from='client.24811 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:43 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2416957871' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:44.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:43 vm05.local ceph-mon[57841]: from='client.24811 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:43 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2416957871' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:45.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:18:45.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:44 vm04.local ceph-mon[53345]: pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:45.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:18:45.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:44 vm05.local ceph-mon[57841]: pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:48.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:47 vm04.local ceph-mon[53345]: pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:48.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:47 vm05.local ceph-mon[57841]: pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:48.440 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:48.632 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:48.632 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 10s ago 4m - - 2026-04-15T14:18:48.632 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 10s ago 4m 121M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:48.632 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 11s ago 4m 122M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:48.632 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (4m) 11s ago 4m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:48.888 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:48.888 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:48.888 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:49.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:48 vm05.local ceph-mon[57841]: from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:49.205 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:48 vm05.local ceph-mon[57841]: from='client.15326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:49.205 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:48 vm05.local ceph-mon[57841]: pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:49.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:48 vm04.local ceph-mon[53345]: from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:49.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:48 vm04.local ceph-mon[53345]: from='client.15326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:49.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:48 vm04.local ceph-mon[53345]: pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:50.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:49 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/25980198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:50.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:49 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/25980198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:51.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:50 vm05.local ceph-mon[57841]: pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:51.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:50 vm04.local ceph-mon[53345]: pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:54.110 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:54.111 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:53 vm04.local ceph-mon[53345]: pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:53 vm05.local ceph-mon[57841]: pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:54.306 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:54.306 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 15s ago 4m - - 2026-04-15T14:18:54.306 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 15s ago 5m 121M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:54.306 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 16s ago 4m 122M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:54.306 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 16s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:18:54.550 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:18:54.550 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:18:54.550 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:18:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:54 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3819503743' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:54 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3819503743' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:18:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:55 vm04.local ceph-mon[53345]: from='client.15334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:55 vm04.local ceph-mon[53345]: from='client.15338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:55 vm04.local ceph-mon[53345]: pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:55 vm05.local ceph-mon[57841]: from='client.15334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:55 vm05.local ceph-mon[57841]: from='client.15338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:18:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:55 vm05.local ceph-mon[57841]: pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:57.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:56 vm04.local ceph-mon[53345]: pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:57.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:56 vm05.local ceph-mon[57841]: pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:18:59.763 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:18:59.952 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:18:59.952 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 21s ago 5m - - 2026-04-15T14:18:59.952 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 21s ago 5m 121M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:18:59.952 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 22s ago 5m 122M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:18:59.952 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 22s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:59 vm04.local ceph-mon[53345]: pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:18:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:59 vm05.local ceph-mon[57841]: pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:18:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:00.203 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:00.203 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:00.203 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:19:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:00 vm04.local ceph-mon[53345]: from='client.24827 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2576647394' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:00 vm05.local ceph-mon[57841]: from='client.24827 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2576647394' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:01 vm04.local ceph-mon[53345]: from='client.15350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:01 vm04.local ceph-mon[53345]: pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:01 vm05.local ceph-mon[57841]: from='client.15350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:01 vm05.local ceph-mon[57841]: pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:03.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:02 vm04.local ceph-mon[53345]: pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:02 vm05.local ceph-mon[57841]: pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:05.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:04 vm05.local ceph-mon[57841]: pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:04 vm04.local ceph-mon[53345]: pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:05.419 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:19:05.601 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:05.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 27s ago 5m - - 2026-04-15T14:19:05.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 27s ago 5m 121M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:19:05.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 28s ago 5m 122M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:05.601 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 28s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:05.844 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:05.844 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:05.844 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:19:06.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:05 vm04.local ceph-mon[53345]: from='client.15358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:06.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:05 vm04.local ceph-mon[53345]: from='client.15362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:06.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:05 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4188106664' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:06.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:05 vm05.local ceph-mon[57841]: from='client.15358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:06.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:05 vm05.local ceph-mon[57841]: from='client.15362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:06.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:05 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4188106664' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:07.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:06 vm05.local ceph-mon[57841]: pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:07.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:06 vm04.local ceph-mon[53345]: pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:10.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:09 vm04.local ceph-mon[53345]: pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:09 vm05.local ceph-mon[57841]: pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:11.058 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to stop 2026-04-15T14:19:11.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:10 vm05.local ceph-mon[57841]: pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:11.241 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:10 vm04.local ceph-mon[53345]: pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:11.242 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:11.242 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 32s ago 5m - - 2026-04-15T14:19:11.242 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 32s ago 5m 121M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:19:11.242 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 33s ago 5m 122M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:11.242 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 33s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:11.469 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:11.469 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:11.469 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:19:12.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:11 vm05.local ceph-mon[57841]: from='client.15370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:12.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:11 vm05.local ceph-mon[57841]: from='client.15374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:12.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:11 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4188782995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:12.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:11 vm04.local ceph-mon[53345]: from='client.15370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:12.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:11 vm04.local ceph-mon[53345]: from='client.15374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:12.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:11 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4188782995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:13.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:12 vm05.local ceph-mon[57841]: pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:13.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:12 vm04.local ceph-mon[53345]: pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:14.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:15.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:15 vm05.local ceph-mon[57841]: pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:15.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:15 vm04.local ceph-mon[53345]: pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:19:15.974 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:19:15.974 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:19:15.975 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:19:16.259 INFO:teuthology.orchestra.run.vm04.stdout:anonymousScheduled to start rgw.foo.vm04.lqvrsn on host 'vm04' 2026-04-15T14:19:16.504 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.lqvrsn to start 2026-04-15T14:19:16.712 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:16.712 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 error 38s ago 5m - - 2026-04-15T14:19:16.712 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 38s ago 5m 121M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:19:16.712 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 39s ago 5m 122M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:16.712 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 39s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:16.971 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:16.972 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:16.972 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.lqvrsn on vm04 is in error state 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: from='client.15382 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.lqvrsn", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: Schedule start daemon rgw.foo.vm04.lqvrsn 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: from='client.15386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: from='client.15390 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:17.202 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:17 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2707990332' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: from='client.15382 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.lqvrsn", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: Schedule start daemon rgw.foo.vm04.lqvrsn 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: from='client.15386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: from='client.15390 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:17.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:17 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2707990332' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.566 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:18 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:18.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:18 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:19.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:19 vm05.local ceph-mon[57841]: pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:19.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:19 vm05.local ceph-mon[57841]: pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:19.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:19 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:19.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:19 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:19.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:19 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:19.964 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:19 vm04.local ceph-mon[53345]: pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:19.964 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:19 vm04.local ceph-mon[53345]: pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:19.964 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:19 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:19.964 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:19 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:19.964 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:19 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 8.6 KiB/s rd, 0 B/s wr, 13 op/s 2026-04-15T14:19:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:21.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:21.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 50 op/s 2026-04-15T14:19:21.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:20 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 8.6 KiB/s rd, 0 B/s wr, 13 op/s 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 50 op/s 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:20 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:22.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:21 vm04.local ceph-mon[53345]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T14:19:22.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:21 vm04.local ceph-mon[53345]: Cluster is now healthy 2026-04-15T14:19:22.182 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3s) 1s ago 5m 99.2M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:21 vm05.local ceph-mon[57841]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T14:19:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:21 vm05.local ceph-mon[57841]: Cluster is now healthy 2026-04-15T14:19:22.392 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to stop rgw.foo.vm04.owsxoy on host 'vm04' 2026-04-15T14:19:22.604 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:19:22.806 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:22.806 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4s) 2s ago 5m 99.2M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:22.806 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 2s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:19:22.807 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 2s ago 5m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:22.807 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 2s ago 5m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:23.055 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='client.15410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='client.15414 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.owsxoy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: Schedule stop daemon rgw.foo.vm04.owsxoy 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='client.15418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 54 KiB/s rd, 0 B/s wr, 88 op/s 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='client.15422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:23 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1262116284' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='client.15410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='client.15414 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.owsxoy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: Schedule stop daemon rgw.foo.vm04.owsxoy 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='client.15418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 54 KiB/s rd, 0 B/s wr, 88 op/s 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='client.15422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:23 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1262116284' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:25 vm04.local ceph-mon[53345]: pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 0 B/s wr, 164 op/s 2026-04-15T14:19:25.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:25.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:25.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:25.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:25.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:25.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:25.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:25 vm05.local ceph-mon[57841]: pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 0 B/s wr, 164 op/s 2026-04-15T14:19:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:26 vm05.local ceph-mon[57841]: pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 0 B/s wr, 152 op/s 2026-04-15T14:19:27.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:26 vm04.local ceph-mon[53345]: pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 0 B/s wr, 152 op/s 2026-04-15T14:19:28.278 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:19:28.467 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:28.467 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9s) 4s ago 5m 99.5M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:28.467 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 4s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:19:28.467 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 8s ago 5m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:28.467 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 8s ago 5m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:28.709 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:19:29.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:28 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4167194124' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:29.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:28 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4167194124' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:30.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:29 vm04.local ceph-mon[53345]: from='client.15430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:30.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:29 vm04.local ceph-mon[53345]: from='client.15434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:30.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:29 vm04.local ceph-mon[53345]: pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 75 KiB/s rd, 193 B/s wr, 123 op/s 2026-04-15T14:19:30.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:30.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:30.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:29 vm05.local ceph-mon[57841]: from='client.15430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:29 vm05.local ceph-mon[57841]: from='client.15434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:29 vm05.local ceph-mon[57841]: pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 75 KiB/s rd, 193 B/s wr, 123 op/s 2026-04-15T14:19:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:32.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:31 vm04.local ceph-mon[53345]: pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 193 B/s wr, 111 op/s 2026-04-15T14:19:32.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:31 vm05.local ceph-mon[57841]: pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 193 B/s wr, 111 op/s 2026-04-15T14:19:33.920 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:19:34.097 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:33 vm04.local ceph-mon[53345]: pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 170 B/s wr, 75 op/s 2026-04-15T14:19:34.098 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:34.098 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (15s) 9s ago 5m 99.5M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:34.098 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 9s ago 5m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb ec1c08c5cfc2 2026-04-15T14:19:34.098 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 14s ago 5m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:34.098 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 14s ago 5m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:34.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:33 vm05.local ceph-mon[57841]: pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 170 B/s wr, 75 op/s 2026-04-15T14:19:34.333 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:19:35.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:34 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4102386860' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:35.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:35.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:35.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:34 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:34 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4102386860' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:34 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:19:35.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:35 vm04.local ceph-mon[53345]: from='client.15442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:35.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:35 vm04.local ceph-mon[53345]: from='client.15446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:35.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:35 vm04.local ceph-mon[53345]: pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 511 B/s wr, 50 op/s 2026-04-15T14:19:36.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:35 vm05.local ceph-mon[57841]: from='client.15442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:36.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:35 vm05.local ceph-mon[57841]: from='client.15446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:36.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:35 vm05.local ceph-mon[57841]: pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 511 B/s wr, 50 op/s 2026-04-15T14:19:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:37.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:37.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:37 vm05.local ceph-mon[57841]: pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 267 B/s rd, 534 B/s wr, 0 op/s 2026-04-15T14:19:37.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:37.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:37 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:19:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:19:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:37 vm04.local ceph-mon[53345]: pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 267 B/s rd, 534 B/s wr, 0 op/s 2026-04-15T14:19:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:37 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:19:38.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:38 vm05.local ceph-mon[57841]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T14:19:38.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:38 vm04.local ceph-mon[53345]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T14:19:39.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:39 vm05.local ceph-mon[57841]: pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 267 B/s rd, 534 B/s wr, 0 op/s 2026-04-15T14:19:39.532 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:39 vm04.local ceph-mon[53345]: pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 267 B/s rd, 534 B/s wr, 0 op/s 2026-04-15T14:19:39.532 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (21s) 3s ago 5m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3s ago 5m - - 2026-04-15T14:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 19s ago 5m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 19s ago 5m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:39.929 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:39.929 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:39.929 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:19:40.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:40 vm04.local ceph-mon[53345]: from='client.15450 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:40.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:40 vm04.local ceph-mon[53345]: from='client.15454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:40.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:40 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2272652688' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:40.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:40 vm05.local ceph-mon[57841]: from='client.15450 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:40.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:40 vm05.local ceph-mon[57841]: from='client.15454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:40.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:40 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2272652688' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:41.518 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:41 vm04.local ceph-mon[53345]: pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 178 B/s rd, 356 B/s wr, 0 op/s 2026-04-15T14:19:41.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:41 vm05.local ceph-mon[57841]: pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 178 B/s rd, 356 B/s wr, 0 op/s 2026-04-15T14:19:43.495 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:43 vm05.local ceph-mon[57841]: pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 178 B/s rd, 356 B/s wr, 0 op/s 2026-04-15T14:19:43.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:43 vm04.local ceph-mon[53345]: pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 178 B/s rd, 356 B/s wr, 0 op/s 2026-04-15T14:19:45.132 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:19:45.311 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:45.311 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (26s) 9s ago 5m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:45.311 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 9s ago 5m - - 2026-04-15T14:19:45.311 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 25s ago 5m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:45.311 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 25s ago 5m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:45.543 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:45.543 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:45.543 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:19:45.543 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:45 vm04.local ceph-mon[53345]: pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 178 B/s rd, 356 B/s wr, 0 op/s 2026-04-15T14:19:45.543 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:45 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:45.543 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:45 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:45.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:45 vm05.local ceph-mon[57841]: pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 178 B/s rd, 356 B/s wr, 0 op/s 2026-04-15T14:19:45.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:45 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:19:45.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:45 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:46.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:46 vm04.local ceph-mon[53345]: from='client.15462 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:46.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:46 vm04.local ceph-mon[53345]: from='client.15466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:46.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:46 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1301819639' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:46.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:46 vm05.local ceph-mon[57841]: from='client.15462 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:46.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:46 vm05.local ceph-mon[57841]: from='client.15466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:46.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:46 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1301819639' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:47.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:47 vm04.local ceph-mon[53345]: pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:47.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:47 vm05.local ceph-mon[57841]: pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:49.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:49 vm04.local ceph-mon[53345]: pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:49.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:49 vm05.local ceph-mon[57841]: pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:50.759 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:19:50.948 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:50.948 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (32s) 14s ago 5m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:50.949 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 14s ago 5m - - 2026-04-15T14:19:50.949 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 31s ago 5m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:50.949 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5m) 31s ago 5m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:51.184 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:51.184 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:51.184 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:19:51.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:51 vm04.local ceph-mon[53345]: pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:51.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:51 vm04.local ceph-mon[53345]: from='client.15474 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:51.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:51 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2192943715' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:51.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:51 vm05.local ceph-mon[57841]: pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:51.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:51 vm05.local ceph-mon[57841]: from='client.15474 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:51.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:51 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2192943715' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:52.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:52 vm04.local ceph-mon[53345]: from='client.15478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:52.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:52 vm05.local ceph-mon[57841]: from='client.15478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:53.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:53 vm05.local ceph-mon[57841]: pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:53.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:53 vm04.local ceph-mon[53345]: pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:19:55.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:55 vm05.local ceph-mon[57841]: pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:19:55.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:55 vm04.local ceph-mon[53345]: pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:19:56.398 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:19:56.579 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:19:56.579 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (38s) 20s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:19:56.579 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 20s ago 6m - - 2026-04-15T14:19:56.579 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 36s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:19:56.579 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 36s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:19:56.821 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:19:56.821 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:19:56.821 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:19:57.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:57 vm05.local ceph-mon[57841]: pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:19:57.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:57 vm05.local ceph-mon[57841]: from='client.15486 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:57.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:57 vm05.local ceph-mon[57841]: from='client.15490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:57.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:57 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/193721494' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:57.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:57 vm04.local ceph-mon[53345]: pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:19:57.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:57 vm04.local ceph-mon[53345]: from='client.15486 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:57.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:57 vm04.local ceph-mon[53345]: from='client.15490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:19:57.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:57 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/193721494' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:19:59.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:59 vm05.local ceph-mon[57841]: pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:19:59.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:19:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:19:59.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:59 vm04.local ceph-mon[53345]: pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:19:59.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:19:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:20:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:00 vm05.local ceph-mon[57841]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:00.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:00 vm05.local ceph-mon[57841]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:00.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:00 vm05.local ceph-mon[57841]: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:00.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:00 vm04.local ceph-mon[53345]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:00.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:00 vm04.local ceph-mon[53345]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:00.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:00 vm04.local ceph-mon[53345]: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:01.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:01 vm05.local ceph-mon[57841]: pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:01.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:01 vm04.local ceph-mon[53345]: pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:02.036 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:02.237 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:02.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (43s) 26s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:02.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 26s ago 6m - - 2026-04-15T14:20:02.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 42s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:02.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 42s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:02.482 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:02.482 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:02.482 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:03.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:03 vm05.local ceph-mon[57841]: from='client.15498 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:03.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:03 vm05.local ceph-mon[57841]: pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:03.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:03 vm05.local ceph-mon[57841]: from='client.15502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:03.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:03 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1349174381' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:03.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:03 vm04.local ceph-mon[53345]: from='client.15498 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:03.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:03 vm04.local ceph-mon[53345]: pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:03.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:03 vm04.local ceph-mon[53345]: from='client.15502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:03.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:03 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1349174381' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:05.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:05 vm04.local ceph-mon[53345]: pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:20:05.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:05 vm05.local ceph-mon[57841]: pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:20:07.697 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:07.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:07 vm04.local ceph-mon[53345]: pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:07.886 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:07.886 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (49s) 31s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:07.886 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 31s ago 6m - - 2026-04-15T14:20:07.886 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 48s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:07.886 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 48s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:07.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:07 vm05.local ceph-mon[57841]: pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:08.129 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:08.129 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:08.129 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:08.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:08 vm04.local ceph-mon[53345]: from='client.15510 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:08.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:08 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/831255755' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:08 vm05.local ceph-mon[57841]: from='client.15510 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:08 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/831255755' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:09.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:09 vm04.local ceph-mon[53345]: from='client.15514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:09.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:09 vm04.local ceph-mon[53345]: pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:09.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:09 vm05.local ceph-mon[57841]: from='client.15514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:09.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:09 vm05.local ceph-mon[57841]: pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:11.816 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:11 vm04.local ceph-mon[53345]: pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:11 vm05.local ceph-mon[57841]: pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:13.351 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:13.541 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:13.541 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (54s) 37s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:13.542 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 37s ago 6m - - 2026-04-15T14:20:13.542 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 53s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:13.542 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 53s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:13.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:13 vm04.local ceph-mon[53345]: pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:13.744 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:13 vm05.local ceph-mon[57841]: pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:13.781 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:13.781 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:13.781 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:14.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:14 vm04.local ceph-mon[53345]: from='client.15522 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:14.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:14 vm04.local ceph-mon[53345]: from='client.15526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:14.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:14 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4217675246' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:14.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:20:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:14 vm05.local ceph-mon[57841]: from='client.15522 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:14 vm05.local ceph-mon[57841]: from='client.15526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:14 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4217675246' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:20:15.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:15 vm04.local ceph-mon[53345]: pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:15.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:15 vm05.local ceph-mon[57841]: pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:17.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:17 vm04.local ceph-mon[53345]: pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:17.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:17 vm05.local ceph-mon[57841]: pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:19.001 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:19.181 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:19.181 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (60s) 43s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:19.181 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 43s ago 6m - - 2026-04-15T14:20:19.181 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 59s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:19.181 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 59s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:19.415 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:19.415 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:19.415 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:19.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:19 vm04.local ceph-mon[53345]: pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:19.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:19 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/389146284' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:19.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:19 vm05.local ceph-mon[57841]: pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:19.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:19 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/389146284' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:20.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:20 vm04.local ceph-mon[53345]: from='client.15534 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:20.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:20 vm04.local ceph-mon[53345]: from='client.15538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:20 vm05.local ceph-mon[57841]: from='client.15534 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:20 vm05.local ceph-mon[57841]: from='client.15538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:21.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:21 vm04.local ceph-mon[53345]: pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:21.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:21 vm05.local ceph-mon[57841]: pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:23.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:23 vm04.local ceph-mon[53345]: pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:23.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:23 vm05.local ceph-mon[57841]: pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:24.633 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:24.811 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:24.811 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (66s) 48s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:24.811 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 48s ago 6m - - 2026-04-15T14:20:24.811 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 64s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:24.811 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 64s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:25.042 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:25.042 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:25.042 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:25.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:25 vm04.local ceph-mon[53345]: pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:25.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:25 vm04.local ceph-mon[53345]: from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:25.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:25 vm04.local ceph-mon[53345]: from='client.15550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:25.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:25 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/151313807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:25 vm05.local ceph-mon[57841]: pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:25 vm05.local ceph-mon[57841]: from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:25 vm05.local ceph-mon[57841]: from='client.15550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:25 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/151313807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:27.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:27 vm04.local ceph-mon[53345]: pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:27.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:27 vm05.local ceph-mon[57841]: pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:29.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:29 vm04.local ceph-mon[53345]: pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:29.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:20:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:29 vm05.local ceph-mon[57841]: pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:20:30.252 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:30.435 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:30.435 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (71s) 54s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:30.435 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 54s ago 6m - - 2026-04-15T14:20:30.435 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 70s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:30.435 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 70s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:30.656 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:30.656 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:30.656 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:31.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:31 vm04.local ceph-mon[53345]: pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:31.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:31 vm04.local ceph-mon[53345]: from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:31.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:31 vm04.local ceph-mon[53345]: from='client.15562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:31.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:31 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/124748755' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:31.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:31 vm05.local ceph-mon[57841]: pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:31.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:31 vm05.local ceph-mon[57841]: from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:31.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:31 vm05.local ceph-mon[57841]: from='client.15562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:31.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:31 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/124748755' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:33.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:33 vm04.local ceph-mon[53345]: pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:33.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:33 vm05.local ceph-mon[57841]: pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:35.860 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:35.860 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:35 vm04.local ceph-mon[53345]: pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:20:35.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:35 vm05.local ceph-mon[57841]: pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:20:36.032 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:36.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (77s) 59s ago 6m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:36.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 59s ago 6m - - 2026-04-15T14:20:36.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 76s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:36.032 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 76s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:36.253 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:36.253 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:36.253 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:36.769 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:36 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:20:36.769 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:36 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/899688174' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:36.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:36 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:20:36.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:36 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/899688174' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:37.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:37 vm04.local ceph-mon[53345]: from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:37.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:37 vm04.local ceph-mon[53345]: from='client.15574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:37.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:37 vm04.local ceph-mon[53345]: pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:37.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:37 vm05.local ceph-mon[57841]: from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:37.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:37 vm05.local ceph-mon[57841]: from='client.15574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:37.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:37 vm05.local ceph-mon[57841]: pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:20:39.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:20:39.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:20:39.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:20:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:20:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:20:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:20:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:20:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:20:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:20:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:20:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:20:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:20:40.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:39 vm04.local ceph-mon[53345]: pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 349 B/s wr, 0 op/s 2026-04-15T14:20:40.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:39 vm04.local ceph-mon[53345]: pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 420 B/s wr, 0 op/s 2026-04-15T14:20:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:39 vm05.local ceph-mon[57841]: pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 349 B/s wr, 0 op/s 2026-04-15T14:20:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:39 vm05.local ceph-mon[57841]: pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 420 B/s wr, 0 op/s 2026-04-15T14:20:41.453 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:41.625 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:41.625 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (83s) 3s ago 6m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:41.625 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3s ago 6m - - 2026-04-15T14:20:41.625 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 81s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:41.625 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 81s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:41.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:41 vm04.local ceph-mon[53345]: pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 420 B/s wr, 0 op/s 2026-04-15T14:20:41.849 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:41.849 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:41.849 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:42.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:41 vm05.local ceph-mon[57841]: pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 420 B/s wr, 0 op/s 2026-04-15T14:20:43.093 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:42 vm05.local ceph-mon[57841]: from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:43.093 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:42 vm05.local ceph-mon[57841]: from='client.15586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:43.093 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:42 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1597458928' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:43.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:42 vm04.local ceph-mon[53345]: from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:43.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:42 vm04.local ceph-mon[53345]: from='client.15586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:43.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:42 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1597458928' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:44.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:43 vm04.local ceph-mon[53345]: pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 420 B/s wr, 0 op/s 2026-04-15T14:20:44.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:43 vm05.local ceph-mon[57841]: pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 420 B/s wr, 0 op/s 2026-04-15T14:20:45.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:20:45.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:20:46.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:45 vm04.local ceph-mon[53345]: pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:46.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:45 vm05.local ceph-mon[57841]: pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:47.060 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:47.256 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:47.256 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (88s) 9s ago 6m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:47.256 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 9s ago 6m - - 2026-04-15T14:20:47.256 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 87s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:47.256 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (6m) 87s ago 6m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:47.483 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:47.484 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:47.484 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:47.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:47 vm04.local ceph-mon[53345]: pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:47.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:47 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1319450321' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:48.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:47 vm05.local ceph-mon[57841]: pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:48.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:47 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1319450321' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:49.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:48 vm04.local ceph-mon[53345]: from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:49.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:48 vm04.local ceph-mon[53345]: from='client.15598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:49.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:48 vm05.local ceph-mon[57841]: from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:49.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:48 vm05.local ceph-mon[57841]: from='client.15598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:50.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:49 vm04.local ceph-mon[53345]: pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:50.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:49 vm05.local ceph-mon[57841]: pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:52.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:51 vm04.local ceph-mon[53345]: pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:52.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:51 vm05.local ceph-mon[57841]: pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:52.681 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:52.856 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:52.856 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (94s) 15s ago 6m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:52.856 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 15s ago 6m - - 2026-04-15T14:20:52.856 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (6m) 93s ago 6m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:52.856 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 93s ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:53.088 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:53.088 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:53.088 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:54.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:53 vm04.local ceph-mon[53345]: pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:54.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:53 vm04.local ceph-mon[53345]: from='client.15606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:54.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:53 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4282366263' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:54.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:53 vm05.local ceph-mon[57841]: pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:20:54.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:53 vm05.local ceph-mon[57841]: from='client.15606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:53 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4282366263' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:55.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:54 vm04.local ceph-mon[53345]: from='client.15610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:55.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:54 vm05.local ceph-mon[57841]: from='client.15610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:20:56.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:55 vm04.local ceph-mon[53345]: pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:56.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:55 vm05.local ceph-mon[57841]: pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:58.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:57 vm04.local ceph-mon[53345]: pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:58.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:57 vm05.local ceph-mon[57841]: pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:20:58.288 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:20:58.470 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:20:58.470 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (99s) 20s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:20:58.470 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 20s ago 7m - - 2026-04-15T14:20:58.470 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 98s ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:20:58.470 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 98s ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:20:58.695 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:20:58.695 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:20:58.695 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:20:59.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:58 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1453045233' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:20:59.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:58 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1453045233' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:00.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:59 vm04.local ceph-mon[53345]: pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:00.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:59 vm04.local ceph-mon[53345]: from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:00.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:59 vm04.local ceph-mon[53345]: from='client.15622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:00.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:20:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:59 vm05.local ceph-mon[57841]: pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:59 vm05.local ceph-mon[57841]: from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:59 vm05.local ceph-mon[57841]: from='client.15622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:20:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:02.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:01 vm04.local ceph-mon[53345]: pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:02.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:01 vm05.local ceph-mon[57841]: pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:03.912 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:04.102 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:04.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (105s) 26s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:04.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 26s ago 7m - - 2026-04-15T14:21:04.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 104s ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:04.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 104s ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:04.102 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:03 vm04.local ceph-mon[53345]: pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:04.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:03 vm05.local ceph-mon[57841]: pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:04.334 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:04.334 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:04.334 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:05.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:04 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/961374346' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:05.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:04 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/961374346' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:06.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:05 vm04.local ceph-mon[53345]: from='client.15630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:06.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:05 vm04.local ceph-mon[53345]: pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:21:06.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:05 vm04.local ceph-mon[53345]: from='client.15634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:06.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:05 vm05.local ceph-mon[57841]: from='client.15630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:06.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:05 vm05.local ceph-mon[57841]: pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:21:06.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:05 vm05.local ceph-mon[57841]: from='client.15634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:08.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:07 vm04.local ceph-mon[53345]: pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:08.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:07 vm05.local ceph-mon[57841]: pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:09.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:08 vm05.local ceph-mon[57841]: pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:09.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:08 vm04.local ceph-mon[53345]: pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:09.536 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:09.722 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:09.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (111s) 32s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:09.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 32s ago 7m - - 2026-04-15T14:21:09.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 109s ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:09.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 109s ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:09.959 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:09.960 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:09.960 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:09 vm05.local ceph-mon[57841]: from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:09 vm05.local ceph-mon[57841]: from='client.15646 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:10.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:09 vm04.local ceph-mon[53345]: from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:10.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:09 vm04.local ceph-mon[53345]: from='client.15646 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:11.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:10 vm05.local ceph-mon[57841]: pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:11.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:10 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/646439356' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:11.267 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:10 vm04.local ceph-mon[53345]: pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:11.267 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:10 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/646439356' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:13.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:12 vm04.local ceph-mon[53345]: pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:13.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:12 vm05.local ceph-mon[57841]: pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:15.183 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:15.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:14 vm04.local ceph-mon[53345]: pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:15.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:15.368 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:15.368 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (116s) 37s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:15.368 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 37s ago 7m - - 2026-04-15T14:21:15.368 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 115s ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:15.368 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 115s ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:15.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:14 vm05.local ceph-mon[57841]: pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:15.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:15.601 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:15.601 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:15.601 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:16.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:16 vm04.local ceph-mon[53345]: from='client.25053 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:16.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:16 vm04.local ceph-mon[53345]: from='client.15658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:16.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:16 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2778773221' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:16.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:16 vm05.local ceph-mon[57841]: from='client.25053 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:16.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:16 vm05.local ceph-mon[57841]: from='client.15658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:16.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:16 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2778773221' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:17.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:17 vm04.local ceph-mon[53345]: pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:17.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:17 vm05.local ceph-mon[57841]: pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:19.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:19 vm04.local ceph-mon[53345]: pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:19.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:19 vm05.local ceph-mon[57841]: pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:20.794 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:20.963 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:20.963 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 43s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:20.963 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 43s ago 7m - - 2026-04-15T14:21:20.963 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 2m ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:20.963 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 2m ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:21.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:21 vm04.local ceph-mon[53345]: pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:21.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:21 vm04.local ceph-mon[53345]: from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:21.182 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:21.182 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:21.182 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:21.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:21 vm05.local ceph-mon[57841]: pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:21.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:21 vm05.local ceph-mon[57841]: from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:22.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:22 vm04.local ceph-mon[53345]: from='client.15670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:22.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:22 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2010505224' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:22.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:22 vm05.local ceph-mon[57841]: from='client.15670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:22.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:22 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2010505224' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:23.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:23 vm04.local ceph-mon[53345]: pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:23.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:23 vm05.local ceph-mon[57841]: pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:25.265 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:25 vm05.local ceph-mon[57841]: pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:25.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:25 vm04.local ceph-mon[53345]: pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:26.399 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:26.594 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:26.594 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 48s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:26.594 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 48s ago 7m - - 2026-04-15T14:21:26.594 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 2m ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:26.594 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 2m ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:26.835 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:26.836 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:26.836 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:27.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:27 vm04.local ceph-mon[53345]: pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:27.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:27 vm04.local ceph-mon[53345]: from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:27.115 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:27 vm04.local ceph-mon[53345]: from='client.15680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:27.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:27 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/435413806' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:27.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:27 vm05.local ceph-mon[57841]: pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:27.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:27 vm05.local ceph-mon[57841]: from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:27.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:27 vm05.local ceph-mon[57841]: from='client.15680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:27.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:27 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/435413806' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:29.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:29 vm04.local ceph-mon[53345]: pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:29.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:29 vm05.local ceph-mon[57841]: pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:30.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:30.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:31.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:31 vm04.local ceph-mon[53345]: pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:31.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:31 vm05.local ceph-mon[57841]: pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:32.041 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:32.221 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:32.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 54s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:32.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 54s ago 7m - - 2026-04-15T14:21:32.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 2m ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:32.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 2m ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:32.447 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:32.448 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:32.448 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:33.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:33 vm04.local ceph-mon[53345]: pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:33.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:33 vm04.local ceph-mon[53345]: from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:33.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:33 vm04.local ceph-mon[53345]: from='client.15692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:33.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:33 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2405508222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:33.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:33 vm05.local ceph-mon[57841]: pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:33.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:33 vm05.local ceph-mon[57841]: from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:33.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:33 vm05.local ceph-mon[57841]: from='client.15692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:33.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:33 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2405508222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:35.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:35 vm04.local ceph-mon[53345]: pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:21:35.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:35 vm05.local ceph-mon[57841]: pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:21:37.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:37 vm04.local ceph-mon[53345]: pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:37.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:37 vm05.local ceph-mon[57841]: pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:37.643 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:37.821 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:37.821 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 60s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:37.821 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 60s ago 7m - - 2026-04-15T14:21:37.821 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 2m ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:37.821 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 2m ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:38.046 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:38.046 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:38.047 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:38.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:38 vm04.local ceph-mon[53345]: from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:38.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:38 vm04.local ceph-mon[53345]: from='client.15704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:38.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:21:38.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:38 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3269420931' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:38.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:38 vm05.local ceph-mon[57841]: from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:38.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:38 vm05.local ceph-mon[57841]: from='client.15704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:38.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:21:38.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:38 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3269420931' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:39.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:39 vm04.local ceph-mon[53345]: pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:39.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:21:39.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:21:39.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:39 vm04.local ceph-mon[53345]: pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 199 B/s rd, 399 B/s wr, 0 op/s 2026-04-15T14:21:39.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:39 vm04.local ceph-mon[53345]: pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 248 B/s rd, 496 B/s wr, 0 op/s 2026-04-15T14:21:39.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:21:39.365 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:21:39.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:39 vm05.local ceph-mon[57841]: pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:21:39.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:21:39.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:21:39.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:39 vm05.local ceph-mon[57841]: pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 199 B/s rd, 399 B/s wr, 0 op/s 2026-04-15T14:21:39.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:39 vm05.local ceph-mon[57841]: pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 248 B/s rd, 496 B/s wr, 0 op/s 2026-04-15T14:21:39.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:21:39.441 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:21:41.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:41 vm04.local ceph-mon[53345]: pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 248 B/s rd, 496 B/s wr, 0 op/s 2026-04-15T14:21:41.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:41 vm05.local ceph-mon[57841]: pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 248 B/s rd, 496 B/s wr, 0 op/s 2026-04-15T14:21:43.277 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:43.473 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:43.473 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 65s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:43.473 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 65s ago 7m - - 2026-04-15T14:21:43.473 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 2m ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:43.473 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 2m ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:43.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:43 vm04.local ceph-mon[53345]: pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:43.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:43 vm05.local ceph-mon[57841]: pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:43.708 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:43.709 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:43.709 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:44.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:44 vm04.local ceph-mon[53345]: from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:44.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:44 vm04.local ceph-mon[53345]: from='client.15716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:44.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:44 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2231508403' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:44.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:44 vm05.local ceph-mon[57841]: from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:44.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:44 vm05.local ceph-mon[57841]: from='client.15716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:44.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:44 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2231508403' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:45.592 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:45 vm05.local ceph-mon[57841]: pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:45.592 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:45 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:45.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:45 vm04.local ceph-mon[53345]: pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:45.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:45 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:47.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:47 vm04.local ceph-mon[53345]: pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:47.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:47 vm05.local ceph-mon[57841]: pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:48.917 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:49.101 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:49.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 71s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:49.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 71s ago 7m - - 2026-04-15T14:21:49.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (7m) 2m ago 7m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:49.102 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (7m) 2m ago 7m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:49.275 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:49 vm04.local ceph-mon[53345]: pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:49.327 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:49.327 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:49.327 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:49.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:49 vm05.local ceph-mon[57841]: pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:50.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:50 vm04.local ceph-mon[53345]: from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:50.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:50 vm04.local ceph-mon[53345]: from='client.15728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:50.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:50 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3306733198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:50.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:50 vm05.local ceph-mon[57841]: from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:50.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:50 vm05.local ceph-mon[57841]: from='client.15728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:50.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:50 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3306733198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:51.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:51 vm04.local ceph-mon[53345]: pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:51.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:51 vm05.local ceph-mon[57841]: pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:53.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:53 vm04.local ceph-mon[53345]: pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:53.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:53 vm05.local ceph-mon[57841]: pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:21:54.530 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:21:54.702 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:21:54.702 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 77s ago 7m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:21:54.702 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 77s ago 8m - - 2026-04-15T14:21:54.702 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 2m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:21:54.702 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 2m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:21:54.923 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:21:54.923 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:21:54.923 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:21:55.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:55 vm04.local ceph-mon[53345]: pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:55.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:55 vm04.local ceph-mon[53345]: from='client.15736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:55.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:55 vm04.local ceph-mon[53345]: from='client.15740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:55.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:55 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4022046178' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:55.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:55 vm05.local ceph-mon[57841]: pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:55.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:55 vm05.local ceph-mon[57841]: from='client.15736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:55.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:55 vm05.local ceph-mon[57841]: from='client.15740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:21:55.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:55 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4022046178' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:21:57.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:57 vm05.local ceph-mon[57841]: pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:57.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:57 vm04.local ceph-mon[53345]: pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:59.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:59 vm05.local ceph-mon[57841]: pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:59.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:21:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:21:59.718 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:59 vm04.local ceph-mon[53345]: pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:21:59.718 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:21:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:22:00.130 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:00.315 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:00.315 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 82s ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:00.315 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 82s ago 8m - - 2026-04-15T14:22:00.315 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 2m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:00.315 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 2m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:00.561 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:00.561 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:00.561 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:01 vm05.local ceph-mon[57841]: from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:01 vm05.local ceph-mon[57841]: pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:01 vm05.local ceph-mon[57841]: from='client.15752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:01.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:01 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2585440485' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:01.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:01 vm04.local ceph-mon[53345]: from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:01.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:01 vm04.local ceph-mon[53345]: pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:01.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:01 vm04.local ceph-mon[53345]: from='client.15752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:01.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:01 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2585440485' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:03.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:03 vm05.local ceph-mon[57841]: pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:03.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:03 vm04.local ceph-mon[53345]: pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:05.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:05 vm05.local ceph-mon[57841]: pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:22:05.754 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:05.754 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:05 vm04.local ceph-mon[53345]: pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:22:05.933 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:05.933 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 88s ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:05.933 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 88s ago 8m - - 2026-04-15T14:22:05.933 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 2m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:05.933 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 2m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:06.151 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:06.151 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:06.151 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:06.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:06 vm04.local ceph-mon[53345]: from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:06.616 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:06 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2487502804' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:06.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:06 vm05.local ceph-mon[57841]: from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:06.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:06 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2487502804' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:07.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:07 vm05.local ceph-mon[57841]: from='client.15764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:07.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:07 vm05.local ceph-mon[57841]: pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:07.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:07 vm04.local ceph-mon[53345]: from='client.15764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:07.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:07 vm04.local ceph-mon[53345]: pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:09.690 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:09 vm05.local ceph-mon[57841]: pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:09.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:09 vm04.local ceph-mon[53345]: pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:11.356 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:11.537 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:11.537 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 93s ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:11.537 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 93s ago 8m - - 2026-04-15T14:22:11.537 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 2m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:11.537 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 2m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:11.615 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:11 vm04.local ceph-mon[53345]: pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:11.690 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:11 vm05.local ceph-mon[57841]: pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:11.767 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:11.767 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:11.768 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:12.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:12 vm05.local ceph-mon[57841]: from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:12.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:12 vm05.local ceph-mon[57841]: from='client.15776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:12.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:12 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3795424183' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:12.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:12 vm04.local ceph-mon[53345]: from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:12.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:12 vm04.local ceph-mon[53345]: from='client.15776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:12.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:12 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3795424183' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:13.690 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:13 vm05.local ceph-mon[57841]: pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:13.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:13 vm04.local ceph-mon[53345]: pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:14.690 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:22:14.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:22:15.690 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:15 vm05.local ceph-mon[57841]: pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:15.865 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:15 vm04.local ceph-mon[53345]: pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:16.987 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:17.167 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:17.167 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (2m) 99s ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:17.167 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 99s ago 8m - - 2026-04-15T14:22:17.167 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 2m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:17.167 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 2m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:17.407 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:17.407 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:17.407 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:17.690 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:17 vm05.local ceph-mon[57841]: pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:17.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:17 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3670443091' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:17.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:17 vm04.local ceph-mon[53345]: pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:17.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:17 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3670443091' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:18.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:18 vm04.local ceph-mon[53345]: from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:18.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:18 vm04.local ceph-mon[53345]: from='client.15788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:18.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:18 vm05.local ceph-mon[57841]: from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:18.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:18 vm05.local ceph-mon[57841]: from='client.15788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:19.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:19 vm04.local ceph-mon[53345]: pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:19.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:19 vm05.local ceph-mon[57841]: pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:21.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:21 vm04.local ceph-mon[53345]: pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:21.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:21 vm05.local ceph-mon[57841]: pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:22.609 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:22.786 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:22.786 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 105s ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:22.786 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 105s ago 8m - - 2026-04-15T14:22:22.786 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 3m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:22.786 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 3m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:23.018 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:23.018 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:23.019 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:23.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:23 vm04.local ceph-mon[53345]: pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:23.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:23 vm04.local ceph-mon[53345]: from='client.15796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:23.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:23 vm04.local ceph-mon[53345]: from='client.15800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:23.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:23 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2968360364' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:23.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:23 vm05.local ceph-mon[57841]: pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:23.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:23 vm05.local ceph-mon[57841]: from='client.15796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:23.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:23 vm05.local ceph-mon[57841]: from='client.15800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:23.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:23 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2968360364' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:25.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:25 vm04.local ceph-mon[53345]: pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:25.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:25 vm05.local ceph-mon[57841]: pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:27.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:27 vm04.local ceph-mon[53345]: pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:27.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:27 vm05.local ceph-mon[57841]: pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:28.229 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:28.417 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:28.417 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 110s ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:28.418 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 110s ago 8m - - 2026-04-15T14:22:28.418 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 3m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:28.418 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 3m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:28.652 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:28.652 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:28.652 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:29.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:29 vm04.local ceph-mon[53345]: pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:29.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:29 vm04.local ceph-mon[53345]: from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:29.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:29 vm04.local ceph-mon[53345]: from='client.15812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:29.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:29 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/189519592' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:29.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:22:29.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:29 vm05.local ceph-mon[57841]: pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:29 vm05.local ceph-mon[57841]: from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:29 vm05.local ceph-mon[57841]: from='client.15812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:29 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/189519592' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:29.941 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:22:31.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:31 vm04.local ceph-mon[53345]: pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:31.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:31 vm05.local ceph-mon[57841]: pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:33.863 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:33.864 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:33 vm04.local ceph-mon[53345]: pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:33.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:33 vm05.local ceph-mon[57841]: pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:34.062 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:34.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 116s ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:34.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 116s ago 8m - - 2026-04-15T14:22:34.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 3m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:34.063 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 3m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:34.301 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:34.301 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:34.301 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:34.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:34 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1614831401' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:34.940 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:34 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1614831401' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:36.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:35 vm04.local ceph-mon[53345]: from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:36.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:35 vm04.local ceph-mon[53345]: from='client.15824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:36.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:35 vm04.local ceph-mon[53345]: pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:22:36.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:35 vm05.local ceph-mon[57841]: from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:36.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:35 vm05.local ceph-mon[57841]: from='client.15824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:36.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:35 vm05.local ceph-mon[57841]: pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:22:38.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:37 vm04.local ceph-mon[53345]: pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:38.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:37 vm05.local ceph-mon[57841]: pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:22:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:22:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:22:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:22:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:22:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:22:39.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:22:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:22:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:22:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:22:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:22:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:22:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:22:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:22:39.500 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:39.674 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:39.674 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 2m ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:39.674 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 8m - - 2026-04-15T14:22:39.674 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 3m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:39.674 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 3m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:39.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:39 vm04.local ceph-mon[53345]: pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:39.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:39 vm04.local ceph-mon[53345]: pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 195 B/s rd, 391 B/s wr, 0 op/s 2026-04-15T14:22:39.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:39 vm04.local ceph-mon[53345]: pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 241 B/s rd, 483 B/s wr, 0 op/s 2026-04-15T14:22:39.910 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:39.910 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:39.910 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:40.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:39 vm05.local ceph-mon[57841]: pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:22:40.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:39 vm05.local ceph-mon[57841]: pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 195 B/s rd, 391 B/s wr, 0 op/s 2026-04-15T14:22:40.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:39 vm05.local ceph-mon[57841]: pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 241 B/s rd, 483 B/s wr, 0 op/s 2026-04-15T14:22:41.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:40 vm04.local ceph-mon[53345]: from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:41.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:40 vm04.local ceph-mon[53345]: from='client.15836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:41.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:40 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4249691114' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:41.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:40 vm05.local ceph-mon[57841]: from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:41.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:40 vm05.local ceph-mon[57841]: from='client.15836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:41.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:40 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4249691114' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:42.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:41 vm04.local ceph-mon[53345]: pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 241 B/s rd, 483 B/s wr, 0 op/s 2026-04-15T14:22:42.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:41 vm05.local ceph-mon[57841]: pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 241 B/s rd, 483 B/s wr, 0 op/s 2026-04-15T14:22:44.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:43 vm04.local ceph-mon[53345]: pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:44.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:43 vm05.local ceph-mon[57841]: pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:45.091 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:22:45.114 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:22:45.114 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:45.297 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:45.297 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 2m ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:45.297 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 8m - - 2026-04-15T14:22:45.297 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 3m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:45.297 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 3m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:45.521 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:45.521 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:45.521 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:45.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:45 vm04.local ceph-mon[53345]: pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:45.866 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:45 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/191287688' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:46.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:45 vm05.local ceph-mon[57841]: pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:46.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:45 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/191287688' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:47.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:46 vm04.local ceph-mon[53345]: from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:47.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:46 vm04.local ceph-mon[53345]: from='client.15848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:47.190 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:46 vm05.local ceph-mon[57841]: from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:47.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:46 vm05.local ceph-mon[57841]: from='client.15848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:48.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:47 vm04.local ceph-mon[53345]: pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:48.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:47 vm05.local ceph-mon[57841]: pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:50.116 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:49 vm04.local ceph-mon[53345]: pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:50.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:49 vm05.local ceph-mon[57841]: pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:50.745 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:50.942 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:50.942 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 2m ago 8m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:50.942 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 8m - - 2026-04-15T14:22:50.942 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (8m) 3m ago 8m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:50.942 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (8m) 3m ago 8m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:51.184 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:51.184 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:51.185 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:51 vm04.local ceph-mon[53345]: pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:51 vm04.local ceph-mon[53345]: from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:51 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3190236855' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:52.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:51 vm05.local ceph-mon[57841]: pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:52.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:51 vm05.local ceph-mon[57841]: from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:52.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:51 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3190236855' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:53.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:52 vm04.local ceph-mon[53345]: from='client.15860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:53.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:52 vm05.local ceph-mon[57841]: from='client.15860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:54.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:53 vm04.local ceph-mon[53345]: pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:54.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:53 vm05.local ceph-mon[57841]: pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:22:56.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:55 vm04.local ceph-mon[53345]: pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:56.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:55 vm05.local ceph-mon[57841]: pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:56.399 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:22:56.594 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:22:56.595 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:22:56.595 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:22:56.595 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 3m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:22:56.595 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 3m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:22:56.840 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:22:56.840 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:22:56.840 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:22:58.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:57 vm04.local ceph-mon[53345]: from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:58.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:57 vm04.local ceph-mon[53345]: from='client.15872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:58.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:57 vm04.local ceph-mon[53345]: pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:58.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:57 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1941337198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:22:58.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:57 vm05.local ceph-mon[57841]: from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:58.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:57 vm05.local ceph-mon[57841]: from='client.15872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:22:58.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:57 vm05.local ceph-mon[57841]: pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:22:58.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:57 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1941337198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:59 vm04.local ceph-mon[53345]: pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:22:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:59 vm05.local ceph-mon[57841]: pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:00.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:22:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:02.075 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:02.075 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:01 vm04.local ceph-mon[53345]: pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:02.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:01 vm05.local ceph-mon[57841]: pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:02.269 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:02.269 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:02.269 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:23:02.269 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 3m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:02.269 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 3m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:02.513 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:02.513 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:02.513 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:03.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:02 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1967399756' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:03.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:02 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1967399756' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:03 vm04.local ceph-mon[53345]: from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:03 vm04.local ceph-mon[53345]: from='client.15884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:04.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:03 vm04.local ceph-mon[53345]: pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:04.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:03 vm05.local ceph-mon[57841]: from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:04.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:03 vm05.local ceph-mon[57841]: from='client.15884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:04.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:03 vm05.local ceph-mon[57841]: pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:06.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:05 vm04.local ceph-mon[53345]: pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:23:06.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:05 vm05.local ceph-mon[57841]: pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:23:07.726 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:07.926 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:07.926 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:07.926 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:23:07.926 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 3m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:07.926 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 3m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:08.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:07 vm04.local ceph-mon[53345]: pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:08.160 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:08.160 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:08.160 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:08.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:07 vm05.local ceph-mon[57841]: pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:08 vm04.local ceph-mon[53345]: from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:08 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2824749216' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:09.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:08 vm05.local ceph-mon[57841]: from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:09.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:08 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2824749216' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:10.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:09 vm04.local ceph-mon[53345]: from='client.15896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:10.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:09 vm04.local ceph-mon[53345]: pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:09 vm05.local ceph-mon[57841]: from='client.15896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:10.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:09 vm05.local ceph-mon[57841]: pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:12.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:11 vm04.local ceph-mon[53345]: pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:12.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:11 vm05.local ceph-mon[57841]: pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:13.379 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:13.573 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:13.573 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (3m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:13.574 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:23:13.574 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 3m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:13.574 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 3m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:13.817 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:13.817 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:13.817 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:14.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:13 vm04.local ceph-mon[53345]: pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:14.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:13 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1642349077' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:13 vm05.local ceph-mon[57841]: pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:14.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:13 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1642349077' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:15.092 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:14 vm05.local ceph-mon[57841]: from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:15.093 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:14 vm05.local ceph-mon[57841]: from='client.15908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:15.093 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:15.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:14 vm04.local ceph-mon[53345]: from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:15.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:14 vm04.local ceph-mon[53345]: from='client.15908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:16.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:15 vm04.local ceph-mon[53345]: pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:16.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:15 vm05.local ceph-mon[57841]: pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:17.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:16 vm05.local ceph-mon[57841]: pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:17.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:16 vm04.local ceph-mon[53345]: pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:19.033 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:19.230 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:19.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:19.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:23:19.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 3m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:19.231 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 3m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:19.479 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:19.479 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:19.479 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:20.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:19 vm04.local ceph-mon[53345]: pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:20.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:19 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2124611490' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:20.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:19 vm05.local ceph-mon[57841]: pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:20.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:19 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2124611490' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:20 vm04.local ceph-mon[53345]: from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:21.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:20 vm04.local ceph-mon[53345]: from='client.15920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:21.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:20 vm05.local ceph-mon[57841]: from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:21.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:20 vm05.local ceph-mon[57841]: from='client.15920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:22.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:21 vm04.local ceph-mon[53345]: pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:22.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:21 vm05.local ceph-mon[57841]: pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:24.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:23 vm04.local ceph-mon[53345]: pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:24.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:23 vm05.local ceph-mon[57841]: pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:24.695 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:24.887 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:24.887 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:24.887 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:23:24.887 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 4m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:24.887 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 4m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:25.136 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:25.136 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:25.136 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:26.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:25 vm04.local ceph-mon[53345]: from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:26.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:25 vm04.local ceph-mon[53345]: pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:26.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:25 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2941610875' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:26.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:25 vm05.local ceph-mon[57841]: from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:26.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:25 vm05.local ceph-mon[57841]: pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:26.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:25 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2941610875' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:27.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:26 vm04.local ceph-mon[53345]: from='client.15932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:27.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:26 vm05.local ceph-mon[57841]: from='client.15932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:28.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:27 vm04.local ceph-mon[53345]: pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:28.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:27 vm05.local ceph-mon[57841]: pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:29 vm04.local ceph-mon[53345]: pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:30.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:29 vm05.local ceph-mon[57841]: pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:30.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:30.354 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:30.546 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:30.546 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:30.546 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:23:30.546 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 4m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:30.546 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 4m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:30.808 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:30.808 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:30.808 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:32.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:31 vm04.local ceph-mon[53345]: from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:32.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:31 vm04.local ceph-mon[53345]: from='client.15944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:32.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:31 vm04.local ceph-mon[53345]: pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:32.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:31 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/11691270' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:32.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:31 vm05.local ceph-mon[57841]: from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:32.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:31 vm05.local ceph-mon[57841]: from='client.15944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:32.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:31 vm05.local ceph-mon[57841]: pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:32.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:31 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/11691270' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:34.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:33 vm04.local ceph-mon[53345]: pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:34.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:33 vm05.local ceph-mon[57841]: pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:36.028 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:36.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:35 vm04.local ceph-mon[53345]: pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:23:36.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:35 vm05.local ceph-mon[57841]: pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T14:23:36.207 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:36.208 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 2m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:36.208 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 2m ago 9m - - 2026-04-15T14:23:36.208 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 4m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:36.208 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 4m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:36.465 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:36.466 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:36.466 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:37.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:36 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2253448689' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:37.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:36 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2253448689' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:38.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:37 vm04.local ceph-mon[53345]: from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:38.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:37 vm04.local ceph-mon[53345]: from='client.15956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:38.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:37 vm04.local ceph-mon[53345]: pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:38.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:37 vm05.local ceph-mon[57841]: from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:38.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:37 vm05.local ceph-mon[57841]: from='client.15956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:38.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:37 vm05.local ceph-mon[57841]: pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:39.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:38 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:23:39.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:38 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"} : dispatch 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:23:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"} : dispatch 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:23:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:23:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:40 vm04.local ceph-mon[53345]: pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-04-15T14:23:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:40 vm04.local ceph-mon[53345]: pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 240 B/s rd, 481 B/s wr, 0 op/s 2026-04-15T14:23:41.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:40 vm05.local ceph-mon[57841]: pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-04-15T14:23:41.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:40 vm05.local ceph-mon[57841]: pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 240 B/s rd, 481 B/s wr, 0 op/s 2026-04-15T14:23:41.690 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:41.901 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:41.901 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 3m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:41.901 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 9m - - 2026-04-15T14:23:41.901 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 4m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:41.901 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 4m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:42.159 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:42.159 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:42.159 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:42 vm04.local ceph-mon[53345]: pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 240 B/s rd, 481 B/s wr, 0 op/s 2026-04-15T14:23:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:42 vm04.local ceph-mon[53345]: from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:42 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/590607951' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:42 vm05.local ceph-mon[57841]: pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 240 B/s rd, 481 B/s wr, 0 op/s 2026-04-15T14:23:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:42 vm05.local ceph-mon[57841]: from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:42 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/590607951' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:44.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:43 vm04.local ceph-mon[53345]: from='client.15968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:44.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:43 vm04.local ceph-mon[53345]: pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:44.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:43 vm05.local ceph-mon[57841]: from='client.15968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:44.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:43 vm05.local ceph-mon[57841]: pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:45.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:45.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:46.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:45 vm04.local ceph-mon[53345]: pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:46.191 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:45 vm05.local ceph-mon[57841]: pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:47.385 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:47.575 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:47.575 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 3m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:47.575 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 9m - - 2026-04-15T14:23:47.575 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 4m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:47.575 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (9m) 4m ago 9m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:47.821 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:47.821 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:47.822 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:48.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:48 vm04.local ceph-mon[53345]: pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:48.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:48 vm04.local ceph-mon[53345]: from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:48.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:48 vm04.local ceph-mon[53345]: from='client.15980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:48.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:48 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1962372038' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:48.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:48 vm05.local ceph-mon[57841]: pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:48.691 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:48 vm05.local ceph-mon[57841]: from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:48.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:48 vm05.local ceph-mon[57841]: from='client.15980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:48.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:48 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1962372038' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:50.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:50 vm04.local ceph-mon[53345]: pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:50.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:50 vm05.local ceph-mon[57841]: pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:52.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:52 vm04.local ceph-mon[53345]: pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:52.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:52 vm05.local ceph-mon[57841]: pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:53.044 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:53.237 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:53.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 3m ago 9m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:53.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 9m - - 2026-04-15T14:23:53.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (9m) 4m ago 9m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:53.237 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 4m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:53.476 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:53.476 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:53.476 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:54.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:54 vm04.local ceph-mon[53345]: from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:54.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:54 vm04.local ceph-mon[53345]: pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:54.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:54 vm04.local ceph-mon[53345]: from='client.15992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:54.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:54 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1678007908' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:54.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:54 vm05.local ceph-mon[57841]: from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:54.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:54 vm05.local ceph-mon[57841]: pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:23:54.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:54 vm05.local ceph-mon[57841]: from='client.15992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:54.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:54 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1678007908' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:56.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:56 vm04.local ceph-mon[53345]: pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:56.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:56 vm05.local ceph-mon[57841]: pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:58.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:58 vm05.local ceph-mon[57841]: pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:58.701 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:23:58.701 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:58 vm04.local ceph-mon[53345]: pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:23:58.900 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:23:58.900 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 3m ago 10m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:23:58.900 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 10m - - 2026-04-15T14:23:58.900 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 4m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:23:58.900 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 4m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:23:59.155 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:23:59.155 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:23:59.155 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:23:59.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:59 vm04.local ceph-mon[53345]: from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:59.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:59 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1196322120' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:59.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:23:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:23:59.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:59 vm05.local ceph-mon[57841]: from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:23:59.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:59 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1196322120' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:23:59.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:23:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:24:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:00 vm05.local ceph-mon[57841]: from='client.16004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:00 vm05.local ceph-mon[57841]: pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:24:00.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:00 vm04.local ceph-mon[53345]: from='client.16004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:00.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:00 vm04.local ceph-mon[53345]: pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:24:02.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:02 vm04.local ceph-mon[53345]: pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:24:02.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:02 vm05.local ceph-mon[57841]: pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:24:04.386 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:24:04.607 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:04.607 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 3m ago 10m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:04.607 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 10m - - 2026-04-15T14:24:04.607 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 4m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:24:04.607 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 4m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:04.857 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:24:04.857 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:24:04.857 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:24:04.857 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:04 vm04.local ceph-mon[53345]: pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:24:04.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:04 vm05.local ceph-mon[57841]: pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:24:05.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:05 vm04.local ceph-mon[53345]: from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:05.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:05 vm04.local ceph-mon[53345]: from='client.25311 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:05.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:05 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3905970073' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:05.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:05 vm05.local ceph-mon[57841]: from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:05.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:05 vm05.local ceph-mon[57841]: from='client.25311 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:05.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:05 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3905970073' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:06 vm04.local ceph-mon[53345]: pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 42 KiB/s rd, 511 B/s wr, 70 op/s 2026-04-15T14:24:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:06 vm05.local ceph-mon[57841]: pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 42 KiB/s rd, 511 B/s wr, 70 op/s 2026-04-15T14:24:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:08 vm04.local ceph-mon[53345]: pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:08 vm05.local ceph-mon[57841]: pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:10.092 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:24:10.333 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:10.334 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 3m ago 10m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:10.334 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 10m - - 2026-04-15T14:24:10.334 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 4m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:24:10.334 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 4m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:10.607 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:24:10.607 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:24:10.607 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:24:10.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:10 vm04.local ceph-mon[53345]: pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:10.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:10 vm05.local ceph-mon[57841]: pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:11.769 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:11 vm04.local ceph-mon[53345]: from='client.25319 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:11.769 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:11 vm04.local ceph-mon[53345]: from='client.16026 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:11.769 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:11 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2888686573' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:11 vm05.local ceph-mon[57841]: from='client.25319 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:11 vm05.local ceph-mon[57841]: from='client.16026 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:11.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:11 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2888686573' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:12.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:12 vm04.local ceph-mon[53345]: pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:12.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:12 vm05.local ceph-mon[57841]: pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:14.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:14 vm04.local ceph-mon[53345]: pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:14.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:24:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:14 vm05.local ceph-mon[57841]: pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:24:15.851 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:24:16.073 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:16.073 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (4m) 3m ago 10m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:16.073 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 10m - - 2026-04-15T14:24:16.073 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 4m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:24:16.073 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 4m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:16.320 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:24:16.320 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:24:16.320 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:24:16.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:16 vm04.local ceph-mon[53345]: pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:16.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:16 vm04.local ceph-mon[53345]: from='client.16034 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:16.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:16 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3511885219' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:16.916 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:16 vm05.local ceph-mon[57841]: pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 341 B/s wr, 119 op/s 2026-04-15T14:24:16.916 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:16 vm05.local ceph-mon[57841]: from='client.16034 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:16.916 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:16 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3511885219' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:17.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:17 vm04.local ceph-mon[53345]: from='client.16038 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:17.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:17 vm05.local ceph-mon[57841]: from='client.16038 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:18.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:18 vm04.local ceph-mon[53345]: pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 49 op/s 2026-04-15T14:24:18.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:18 vm05.local ceph-mon[57841]: pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 0 B/s wr, 49 op/s 2026-04-15T14:24:20.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:20 vm04.local ceph-mon[53345]: pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:24:20.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:20 vm05.local ceph-mon[57841]: pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:24:21.539 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to stop 2026-04-15T14:24:21.745 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:21.745 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 3m ago 10m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:21.745 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 10m - - 2026-04-15T14:24:21.745 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 5m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:24:21.745 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 5m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:22.018 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:24:22.018 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:24:22.018 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:24:22.416 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:24:22.416 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:24:22.417 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:24:22.652 INFO:teuthology.orchestra.run.vm04.stdout:anonymousScheduled to start rgw.foo.vm04.owsxoy on host 'vm04' 2026-04-15T14:24:22.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:22 vm04.local ceph-mon[53345]: pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:24:22.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:22 vm04.local ceph-mon[53345]: from='client.16046 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:22.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:22 vm04.local ceph-mon[53345]: from='client.16050 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:22.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:22 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3283208340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:22.901 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm04.owsxoy to start 2026-04-15T14:24:22.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:22 vm05.local ceph-mon[57841]: pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:24:22.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:22 vm05.local ceph-mon[57841]: from='client.16046 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:22.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:22 vm05.local ceph-mon[57841]: from='client.16050 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:22.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:22 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3283208340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:23.106 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:23.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 3m ago 10m 106M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:23.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 error 3m ago 10m - - 2026-04-15T14:24:23.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 5m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:24:23.106 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 5m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:23.369 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:24:23.370 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:24:23.370 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm04.owsxoy on vm04 is in error state 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='client.16058 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.owsxoy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: Schedule start daemon rgw.foo.vm04.owsxoy 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:23 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3239475373' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:24.050 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='client.16058 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.owsxoy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:24.050 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: Schedule start daemon rgw.foo.vm04.owsxoy 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:24.051 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:23 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3239475373' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:24.764 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:24 vm04.local ceph-mon[53345]: from='client.16062 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:24.764 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:24 vm04.local ceph-mon[53345]: from='client.16066 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:24.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:24 vm05.local ceph-mon[57841]: from='client.16062 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:24.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:24 vm05.local ceph-mon[57841]: from='client.16066 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:25 vm04.local ceph-mon[53345]: pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 178 B/s wr, 28 op/s 2026-04-15T14:24:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:25 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:25 vm05.local ceph-mon[57841]: pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 178 B/s wr, 28 op/s 2026-04-15T14:24:25.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:25.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:25.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:25.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:25.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:25.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:25 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:27.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:26 vm04.local ceph-mon[53345]: pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 205 B/s wr, 32 op/s 2026-04-15T14:24:27.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:26 vm04.local ceph-mon[53345]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T14:24:27.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:26 vm04.local ceph-mon[53345]: Cluster is now healthy 2026-04-15T14:24:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:26 vm05.local ceph-mon[57841]: pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 233 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 205 B/s wr, 32 op/s 2026-04-15T14:24:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:26 vm05.local ceph-mon[57841]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T14:24:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:26 vm05.local ceph-mon[57841]: Cluster is now healthy 2026-04-15T14:24:28.608 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5s) 3s ago 10m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:24:28.826 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to stop rgw.foo.vm05.dhvjjs on host 'vm05' 2026-04-15T14:24:28.869 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:28 vm04.local ceph-mon[53345]: pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 205 B/s wr, 90 op/s 2026-04-15T14:24:29.057 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:28 vm05.local ceph-mon[57841]: pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 205 B/s wr, 90 op/s 2026-04-15T14:24:29.078 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.dhvjjs to stop 2026-04-15T14:24:29.281 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:29.281 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 4s ago 10m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:29.281 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (6s) 4s ago 10m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:24:29.282 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 5m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:24:29.282 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 5m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:29.537 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='client.16084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='client.16088 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm05.dhvjjs", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: Schedule stop daemon rgw.foo.vm05.dhvjjs 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:24:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:29 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2416390621' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='client.16084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='client.16088 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm05.dhvjjs", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: Schedule stop daemon rgw.foo.vm05.dhvjjs 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:24:30.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:29 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2416390621' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:31.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:30 vm04.local ceph-mon[53345]: from='client.16092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:31.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:30 vm04.local ceph-mon[53345]: pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 80 KiB/s rd, 205 B/s wr, 131 op/s 2026-04-15T14:24:31.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:30 vm04.local ceph-mon[53345]: from='client.16096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:31.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:30 vm05.local ceph-mon[57841]: from='client.16092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:31.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:30 vm05.local ceph-mon[57841]: pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 80 KiB/s rd, 205 B/s wr, 131 op/s 2026-04-15T14:24:31.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:30 vm05.local ceph-mon[57841]: from='client.16096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:33.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:32 vm04.local ceph-mon[53345]: pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 80 KiB/s rd, 205 B/s wr, 131 op/s 2026-04-15T14:24:33.177 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:32 vm05.local ceph-mon[57841]: pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 80 KiB/s rd, 205 B/s wr, 131 op/s 2026-04-15T14:24:34.765 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.dhvjjs to stop 2026-04-15T14:24:34.977 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:34.977 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 9s ago 10m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:34.977 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (11s) 9s ago 10m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:24:34.977 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (10m) 5m ago 10m 125M - 20.2.0-19-g7ec4401a095 259950fb12cb 3d5f647f7754 2026-04-15T14:24:34.977 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 5m ago 10m 127M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:35.040 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:34 vm04.local ceph-mon[53345]: pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 76 KiB/s rd, 194 B/s wr, 124 op/s 2026-04-15T14:24:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:34 vm05.local ceph-mon[57841]: pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 76 KiB/s rd, 194 B/s wr, 124 op/s 2026-04-15T14:24:35.244 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:24:36.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:35 vm04.local ceph-mon[53345]: from='client.16104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:36.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:35 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1097279492' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:36.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:35 vm05.local ceph-mon[57841]: from='client.16104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:36.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:35 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1097279492' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:37.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:36 vm04.local ceph-mon[53345]: from='client.16108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:37.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:36 vm04.local ceph-mon[53345]: pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 389 B/s wr, 93 op/s 2026-04-15T14:24:37.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:36 vm05.local ceph-mon[57841]: from='client.16108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:37.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:36 vm05.local ceph-mon[57841]: pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 389 B/s wr, 93 op/s 2026-04-15T14:24:38.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:37 vm04.local ceph-mon[53345]: pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 341 B/s wr, 82 op/s 2026-04-15T14:24:38.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:37 vm05.local ceph-mon[57841]: pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 341 B/s wr, 82 op/s 2026-04-15T14:24:39.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:39.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:39.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:39 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:39 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:40.476 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 stopped 0s ago 10m - - 2026-04-15T14:24:40.481 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:24:40.482 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:24:40.482 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:24:40.707 INFO:teuthology.orchestra.run.vm04.stdout:anonymousScheduled to start rgw.foo.vm05.dhvjjs on host 'vm05' 2026-04-15T14:24:40.966 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.dhvjjs to start 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 34 op/s 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:40 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:41.184 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:41.184 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 16s ago 10m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:41.184 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (17s) 16s ago 10m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:24:41.184 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 stopped 1s ago 10m - - 2026-04-15T14:24:41.184 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 1s ago 10m 138M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 34 op/s 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:40 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:41.465 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='client.16116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='client.16120 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm05.dhvjjs", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: Schedule start daemon rgw.foo.vm05.dhvjjs 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2772885054' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:41.958 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:41 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='client.16116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='client.16120 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm05.dhvjjs", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: Schedule start daemon rgw.foo.vm05.dhvjjs 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2772885054' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:42.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:41 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:42.926 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:42 vm05.local ceph-mon[57841]: from='client.16124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:42.927 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:42 vm05.local ceph-mon[57841]: from='client.16128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:42.927 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:42 vm05.local ceph-mon[57841]: pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:24:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:42 vm04.local ceph-mon[53345]: from='client.16124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:42 vm04.local ceph-mon[53345]: from='client.16128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:42 vm04.local ceph-mon[53345]: pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:24:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:43 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:43 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:43 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:43 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:43 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:43 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:44.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:43 vm04.local ceph-mon[53345]: pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:24:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:43 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:43 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:43 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:43 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:43 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:43 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:44.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:43 vm05.local ceph-mon[57841]: pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:24:45.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:24:45.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:24:46.344 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:45 vm05.local ceph-mon[57841]: pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 341 B/s wr, 61 op/s 2026-04-15T14:24:46.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:45 vm04.local ceph-mon[53345]: pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 341 B/s wr, 61 op/s 2026-04-15T14:24:46.707 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5s) 3s ago 10m 97.1M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:24:46.959 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to stop rgw.foo.vm05.pzlhsk on host 'vm05' 2026-04-15T14:24:47.211 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:24:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='client.16144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:46 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:47.250 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='client.16144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:47.250 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:47.250 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:47.250 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:47.250 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:24:47.250 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:24:47.251 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:47.251 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:46 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:24:47.424 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:47.424 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 22s ago 10m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:47.424 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (24s) 22s ago 10m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:24:47.424 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5s) 4s ago 10m 97.1M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:24:47.424 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (10m) 4s ago 10m 138M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:47.678 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:24:48.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:48 vm04.local ceph-mon[53345]: from='client.16148 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm05.pzlhsk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:48.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:48 vm04.local ceph-mon[53345]: Schedule stop daemon rgw.foo.vm05.pzlhsk 2026-04-15T14:24:48.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:48 vm04.local ceph-mon[53345]: pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T14:24:48.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:48 vm04.local ceph-mon[53345]: from='client.16152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:48.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:48 vm04.local ceph-mon[53345]: from='client.16156 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:48.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:48 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/795875884' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:48.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:48 vm05.local ceph-mon[57841]: from='client.16148 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm05.pzlhsk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:48.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:48 vm05.local ceph-mon[57841]: Schedule stop daemon rgw.foo.vm05.pzlhsk 2026-04-15T14:24:48.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:48 vm05.local ceph-mon[57841]: pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T14:24:48.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:48 vm05.local ceph-mon[57841]: from='client.16152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:48.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:48 vm05.local ceph-mon[57841]: from='client.16156 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:48.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:48 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/795875884' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:50.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:50 vm04.local ceph-mon[53345]: pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 109 op/s 2026-04-15T14:24:50.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:50 vm05.local ceph-mon[57841]: pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 109 op/s 2026-04-15T14:24:52.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:52 vm04.local ceph-mon[53345]: pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 109 op/s 2026-04-15T14:24:52.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:52 vm05.local ceph-mon[57841]: pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 109 op/s 2026-04-15T14:24:52.913 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:24:53.132 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:53.132 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 27s ago 10m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:53.132 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (29s) 27s ago 10m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:24:53.132 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (11s) 10s ago 10m 97.1M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:24:53.132 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (11m) 10s ago 11m 138M - 20.2.0-19-g7ec4401a095 259950fb12cb 5bfb90e1bb45 2026-04-15T14:24:53.418 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:24:54.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:54 vm04.local ceph-mon[53345]: from='client.16164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:54.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:54 vm04.local ceph-mon[53345]: from='client.16168 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:54.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:54 vm04.local ceph-mon[53345]: pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 109 op/s 2026-04-15T14:24:54.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:54 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1273904806' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:54.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:54 vm05.local ceph-mon[57841]: from='client.16164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:54.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:54 vm05.local ceph-mon[57841]: from='client.16168 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:24:54.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:54 vm05.local ceph-mon[57841]: pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 109 op/s 2026-04-15T14:24:54.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:54 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1273904806' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:24:56.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:56 vm04.local ceph-mon[53345]: pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 341 B/s wr, 109 op/s 2026-04-15T14:24:56.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:56 vm05.local ceph-mon[57841]: pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 341 B/s wr, 109 op/s 2026-04-15T14:24:58.403 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:58 vm05.local ceph-mon[57841]: pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 341 B/s wr, 48 op/s 2026-04-15T14:24:58.403 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:58.403 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:58.403 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:58.718 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:58 vm04.local ceph-mon[53345]: pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 341 B/s wr, 48 op/s 2026-04-15T14:24:58.718 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:58.718 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:24:58.718 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:24:58.718 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:24:58.998 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:24:58.999 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 33s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:24:58.999 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (35s) 33s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:24:58.999 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (17s) 0s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:24:58.999 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 0s ago 11m - - 2026-04-15T14:24:59.305 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='client.16176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 527 B/s wr, 0 op/s 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4289137938' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:24:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='client.16176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 527 B/s wr, 0 op/s 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4289137938' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:24:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:00 vm04.local ceph-mon[53345]: from='client.16180 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:00 vm04.local ceph-mon[53345]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T14:25:01.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:00 vm05.local ceph-mon[57841]: from='client.16180 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:01.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:00 vm05.local ceph-mon[57841]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T14:25:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:01 vm04.local ceph-mon[53345]: pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-15T14:25:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:01 vm05.local ceph-mon[57841]: pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-15T14:25:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:02 vm05.local ceph-mon[57841]: pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-15T14:25:03.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:02 vm04.local ceph-mon[53345]: pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-15T14:25:04.522 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:04.725 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:04.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 39s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:04.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (41s) 39s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:04.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (23s) 5s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:04.725 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 5s ago 11m - - 2026-04-15T14:25:04.996 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:04.996 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:04.996 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:05.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:04 vm05.local ceph-mon[57841]: from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:05.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:04 vm05.local ceph-mon[57841]: from='client.16192 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:05.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:04 vm05.local ceph-mon[57841]: pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-15T14:25:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:04 vm04.local ceph-mon[53345]: from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:04 vm04.local ceph-mon[53345]: from='client.16192 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:04 vm04.local ceph-mon[53345]: pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-15T14:25:06.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:05 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/862328985' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:06.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:05 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/862328985' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:07.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:06 vm05.local ceph-mon[57841]: pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-15T14:25:07.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:06 vm04.local ceph-mon[53345]: pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-15T14:25:09.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:09 vm04.local ceph-mon[53345]: pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-15T14:25:09.443 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:09 vm05.local ceph-mon[57841]: pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-15T14:25:10.226 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:10.433 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:10.433 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 45s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:10.433 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (47s) 45s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:10.433 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (28s) 11s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:10.433 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 11s ago 11m - - 2026-04-15T14:25:10.683 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:10.683 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:10.683 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:11.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:10 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4083682736' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:11.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:10 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4083682736' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:12.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:11 vm04.local ceph-mon[53345]: from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:12.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:11 vm04.local ceph-mon[53345]: from='client.16204 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:12.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:11 vm04.local ceph-mon[53345]: pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:12.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:11 vm05.local ceph-mon[57841]: from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:12.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:11 vm05.local ceph-mon[57841]: from='client.16204 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:12.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:11 vm05.local ceph-mon[57841]: pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:13.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:12 vm05.local ceph-mon[57841]: pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:13.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:12 vm04.local ceph-mon[53345]: pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:15.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:15.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:15.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:15 vm04.local ceph-mon[53345]: pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:15.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:25:15.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:15.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:15 vm05.local ceph-mon[57841]: pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:15.929 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:16.135 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:16.135 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (5m) 50s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:16.135 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (52s) 50s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:16.135 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (34s) 17s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:16.135 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 17s ago 11m - - 2026-04-15T14:25:16.400 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:16.400 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:16.400 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:16.760 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:16 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/391049969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:16.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:16 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/391049969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:17.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:17 vm05.local ceph-mon[57841]: from='client.16212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:17.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:17 vm05.local ceph-mon[57841]: from='client.16216 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:17.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:17 vm05.local ceph-mon[57841]: pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:17.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:17 vm04.local ceph-mon[53345]: from='client.16212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:17.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:17 vm04.local ceph-mon[53345]: from='client.16216 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:17.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:17 vm04.local ceph-mon[53345]: pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:19.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:18 vm05.local ceph-mon[57841]: pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:19.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:18 vm04.local ceph-mon[53345]: pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:21.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:20 vm05.local ceph-mon[57841]: pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:21.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:20 vm04.local ceph-mon[53345]: pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:21.622 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:21.806 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:21.806 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 56s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:21.806 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (58s) 56s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:21.806 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (40s) 23s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:21.806 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 23s ago 11m - - 2026-04-15T14:25:22.046 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:22.046 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:22.046 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:21 vm05.local ceph-mon[57841]: from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:21 vm05.local ceph-mon[57841]: from='client.16228 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:22.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:21 vm04.local ceph-mon[53345]: from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:22.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:21 vm04.local ceph-mon[53345]: from='client.16228 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:23.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:22 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/386317077' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:23.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:22 vm05.local ceph-mon[57841]: pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:23.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:22 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/386317077' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:23.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:22 vm04.local ceph-mon[53345]: pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:25.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:24 vm05.local ceph-mon[57841]: pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:25.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:24 vm04.local ceph-mon[53345]: pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:26 vm05.local ceph-mon[57841]: pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:27.276 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:26 vm04.local ceph-mon[53345]: pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:27.276 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:27.478 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:27.478 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 62s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:27.478 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (64s) 62s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:27.478 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (45s) 28s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:27.478 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 28s ago 11m - - 2026-04-15T14:25:27.747 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:27.747 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:27.747 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:28.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:27 vm04.local ceph-mon[53345]: from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:28.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:27 vm04.local ceph-mon[53345]: from='client.16240 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:28.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:27 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3227662769' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:28.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:27 vm05.local ceph-mon[57841]: from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:28.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:27 vm05.local ceph-mon[57841]: from='client.16240 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:28.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:27 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3227662769' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:29.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:28 vm05.local ceph-mon[57841]: pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:29.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:28 vm04.local ceph-mon[53345]: pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:30.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:31.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:30 vm05.local ceph-mon[57841]: pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:31.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:30 vm04.local ceph-mon[53345]: pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:32.994 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:33.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:32 vm05.local ceph-mon[57841]: pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:33.200 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:33.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 68s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:33.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (69s) 68s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:33.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (51s) 34s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:33.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 34s ago 11m - - 2026-04-15T14:25:33.201 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:32 vm04.local ceph-mon[53345]: pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:33.462 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:33.462 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:33.462 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:34.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:33 vm05.local ceph-mon[57841]: from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:34.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:33 vm05.local ceph-mon[57841]: from='client.16252 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:34.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:33 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1462638312' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:34.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:33 vm04.local ceph-mon[53345]: from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:34.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:33 vm04.local ceph-mon[53345]: from='client.16252 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:34.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:33 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1462638312' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:34 vm05.local ceph-mon[57841]: pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:35.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:34 vm04.local ceph-mon[53345]: pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:37.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:36 vm05.local ceph-mon[57841]: pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:37.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:36 vm04.local ceph-mon[53345]: pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:38.675 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:38.869 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:38.869 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 73s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:38.869 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (75s) 73s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:38.869 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (57s) 40s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:38.869 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 40s ago 11m - - 2026-04-15T14:25:39.111 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:39.111 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:39.111 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:39.111 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:38 vm04.local ceph-mon[53345]: from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:39.111 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:38 vm04.local ceph-mon[53345]: pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:39.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:38 vm05.local ceph-mon[57841]: from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:39.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:38 vm05.local ceph-mon[57841]: pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:39 vm04.local ceph-mon[53345]: from='client.16264 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:39 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3219407037' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:40.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:39 vm05.local ceph-mon[57841]: from='client.16264 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:40.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:39 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3219407037' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:41.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:40 vm05.local ceph-mon[57841]: pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:41.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:40 vm04.local ceph-mon[53345]: pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:42 vm05.local ceph-mon[57841]: pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:43.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:42 vm04.local ceph-mon[53345]: pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:25:44.322 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:44.505 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:44.505 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 79s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:44.505 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (81s) 79s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:44.505 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (62s) 45s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:44.505 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 45s ago 11m - - 2026-04-15T14:25:44.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:44.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:25:44.756 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:44.756 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:44.756 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:45.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:45 vm05.local ceph-mon[57841]: from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:45.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:45 vm05.local ceph-mon[57841]: from='client.16276 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:45.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:45 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3136135996' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:45.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:45 vm05.local ceph-mon[57841]: pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:45.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:45 vm04.local ceph-mon[53345]: from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:45.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:45 vm04.local ceph-mon[53345]: from='client.16276 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:45.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:45 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3136135996' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:45.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:45 vm04.local ceph-mon[53345]: pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:47.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:46 vm05.local ceph-mon[57841]: pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:47.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:46 vm04.local ceph-mon[53345]: pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:49.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:48 vm05.local ceph-mon[57841]: pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:49.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:48 vm04.local ceph-mon[53345]: pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:49.981 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:50.184 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:50.185 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 85s ago 11m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:50.185 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (86s) 85s ago 11m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:50.185 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (68s) 51s ago 11m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:50.185 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 51s ago 11m - - 2026-04-15T14:25:50.439 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:50.439 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:50.439 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:50.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:50 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3532784724' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:50.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:50 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3532784724' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:51.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:51 vm04.local ceph-mon[53345]: from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:51.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:51 vm04.local ceph-mon[53345]: from='client.16288 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:51.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:51 vm04.local ceph-mon[53345]: pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:51.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:51 vm05.local ceph-mon[57841]: from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:51.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:51 vm05.local ceph-mon[57841]: from='client.16288 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:51.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:51 vm05.local ceph-mon[57841]: pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:53.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:52 vm04.local ceph-mon[53345]: pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:53.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:52 vm05.local ceph-mon[57841]: pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:54 vm05.local ceph-mon[57841]: pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:55.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:54 vm04.local ceph-mon[53345]: pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:55.675 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:25:55.869 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:25:55.869 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 90s ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:25:55.869 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (92s) 90s ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:25:55.870 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (74s) 57s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:25:55.870 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 57s ago 12m - - 2026-04-15T14:25:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:55 vm04.local ceph-mon[53345]: from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:56.120 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:25:56.120 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:25:56.120 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:25:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:55 vm05.local ceph-mon[57841]: from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:57.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:56 vm05.local ceph-mon[57841]: from='client.16300 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:57.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:56 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2855472356' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:57.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:56 vm05.local ceph-mon[57841]: pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:57.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:56 vm04.local ceph-mon[53345]: from='client.16300 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:25:57.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:56 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2855472356' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:25:57.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:56 vm04.local ceph-mon[53345]: pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:25:59.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:58 vm05.local ceph-mon[57841]: pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:59.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:58 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:25:59.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:58 vm04.local ceph-mon[53345]: pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:25:59.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:58 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:26:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:26:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:26:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:59 vm04.local ceph-mon[53345]: pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 397 B/s wr, 0 op/s 2026-04-15T14:26:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:59 vm04.local ceph-mon[53345]: pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 246 B/s rd, 492 B/s wr, 0 op/s 2026-04-15T14:26:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:26:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:26:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:25:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:26:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:26:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:59 vm05.local ceph-mon[57841]: pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 397 B/s wr, 0 op/s 2026-04-15T14:26:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:59 vm05.local ceph-mon[57841]: pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 246 B/s rd, 492 B/s wr, 0 op/s 2026-04-15T14:26:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:26:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:26:00.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:25:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:01.329 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:01.529 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:01.529 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 96s ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:01.529 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (98s) 96s ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:01.529 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (80s) 62s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:01.529 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 62s ago 12m - - 2026-04-15T14:26:01.773 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:01.773 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:01.773 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:02.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:02 vm04.local ceph-mon[53345]: pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 246 B/s rd, 492 B/s wr, 0 op/s 2026-04-15T14:26:02.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:02 vm04.local ceph-mon[53345]: from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:02.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:02 vm04.local ceph-mon[53345]: from='client.16312 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:02.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:02 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/235212435' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:02.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:02 vm05.local ceph-mon[57841]: pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 246 B/s rd, 492 B/s wr, 0 op/s 2026-04-15T14:26:02.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:02 vm05.local ceph-mon[57841]: from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:02.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:02 vm05.local ceph-mon[57841]: from='client.16312 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:02.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:02 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/235212435' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:04.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:04 vm04.local ceph-mon[53345]: pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-04-15T14:26:04.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:04 vm05.local ceph-mon[57841]: pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-04-15T14:26:06.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:06 vm04.local ceph-mon[53345]: pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-04-15T14:26:06.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:06 vm05.local ceph-mon[57841]: pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-04-15T14:26:06.991 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:07.221 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:07.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 102s ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:07.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (104s) 102s ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:07.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (85s) 68s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:07.221 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 68s ago 12m - - 2026-04-15T14:26:07.470 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:07.470 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:07.470 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:08.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:08 vm04.local ceph-mon[53345]: from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:08.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:08 vm04.local ceph-mon[53345]: pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:08.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:08 vm04.local ceph-mon[53345]: from='client.16324 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:08.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:08 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1690194650' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:08.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:08 vm05.local ceph-mon[57841]: from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:08.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:08 vm05.local ceph-mon[57841]: pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:08.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:08 vm05.local ceph-mon[57841]: from='client.16324 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:08.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:08 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1690194650' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:10.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:10 vm04.local ceph-mon[53345]: pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:10.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:10 vm05.local ceph-mon[57841]: pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:12.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:12 vm04.local ceph-mon[53345]: pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:12.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:12 vm05.local ceph-mon[57841]: pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:12.721 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:12.925 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:12.925 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (6m) 107s ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:12.925 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (109s) 107s ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:12.925 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (91s) 74s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:12.925 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 74s ago 12m - - 2026-04-15T14:26:13.170 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:13.170 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:13.170 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:13.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:13 vm04.local ceph-mon[53345]: from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:13.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:13 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2766017351' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:13.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:13 vm05.local ceph-mon[57841]: from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:13.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:13 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2766017351' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:14.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:14 vm04.local ceph-mon[53345]: from='client.16336 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:14.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:14 vm04.local ceph-mon[53345]: pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:14.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:14 vm05.local ceph-mon[57841]: from='client.16336 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:14.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:14 vm05.local ceph-mon[57841]: pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:15.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:15.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:16.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:16 vm04.local ceph-mon[53345]: pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:16.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:16 vm05.local ceph-mon[57841]: pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:18.410 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:18.610 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:18.610 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 113s ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:18.610 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (115s) 113s ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:18.610 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (97s) 79s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:18.610 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 79s ago 12m - - 2026-04-15T14:26:18.610 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:18 vm04.local ceph-mon[53345]: pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:18.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:18 vm05.local ceph-mon[57841]: pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:18.867 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:18.867 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:18.867 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:19.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:19 vm04.local ceph-mon[53345]: from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:19.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:19 vm04.local ceph-mon[53345]: from='client.16348 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:19.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:19 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/821857523' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:19.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:19 vm05.local ceph-mon[57841]: from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:19.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:19 vm05.local ceph-mon[57841]: from='client.16348 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:19.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:19 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/821857523' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:20.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:20 vm04.local ceph-mon[53345]: pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:20.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:20 vm05.local ceph-mon[57841]: pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:22.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:22 vm04.local ceph-mon[53345]: pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:22.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:22 vm05.local ceph-mon[57841]: pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:24.072 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:24.252 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:24.252 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 119s ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:24.252 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 119s ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:24.252 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (102s) 85s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:24.252 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 85s ago 12m - - 2026-04-15T14:26:24.489 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:24.489 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:24.489 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:24.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:24 vm04.local ceph-mon[53345]: pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:24.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:24 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1939817322' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:24.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:24 vm05.local ceph-mon[57841]: pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:24.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:24 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1939817322' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:25 vm04.local ceph-mon[53345]: from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:25.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:25 vm04.local ceph-mon[53345]: from='client.16360 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:25 vm05.local ceph-mon[57841]: from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:25.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:25 vm05.local ceph-mon[57841]: from='client.16360 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:26.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:26 vm04.local ceph-mon[53345]: pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:26.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:26 vm05.local ceph-mon[57841]: pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:28.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:28 vm04.local ceph-mon[53345]: pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:28.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:28 vm05.local ceph-mon[57841]: pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:29.712 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:29.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:29.902 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:29.902 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:29.902 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:29.902 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (108s) 91s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:29.902 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 91s ago 12m - - 2026-04-15T14:26:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:30.140 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:30.140 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:30.140 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:30.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:30 vm04.local ceph-mon[53345]: pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:30.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:30 vm04.local ceph-mon[53345]: from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:30.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:30 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1237776886' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:30.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:30 vm05.local ceph-mon[57841]: pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:30.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:30 vm05.local ceph-mon[57841]: from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:30.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:30 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1237776886' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:31.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:31 vm04.local ceph-mon[53345]: from='client.16372 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:31.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:31 vm05.local ceph-mon[57841]: from='client.16372 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:32.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:32 vm04.local ceph-mon[53345]: pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:32.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:32 vm05.local ceph-mon[57841]: pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:34.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:34 vm04.local ceph-mon[53345]: pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:34.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:34 vm05.local ceph-mon[57841]: pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:35.380 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:35.586 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:35.586 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:35.586 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:35.587 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (114s) 96s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:35.587 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 96s ago 12m - - 2026-04-15T14:26:35.837 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:35.837 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:35.837 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:37.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:36 vm04.local ceph-mon[53345]: pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:37.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:36 vm04.local ceph-mon[53345]: from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:37.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:36 vm04.local ceph-mon[53345]: from='client.16384 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:37.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:36 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1767582536' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:37.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:36 vm05.local ceph-mon[57841]: pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:37.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:36 vm05.local ceph-mon[57841]: from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:37.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:36 vm05.local ceph-mon[57841]: from='client.16384 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:37.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:36 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1767582536' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:39.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:38 vm04.local ceph-mon[53345]: pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:38 vm05.local ceph-mon[57841]: pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:41.047 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:41.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:40 vm04.local ceph-mon[53345]: pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:41.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:40 vm05.local ceph-mon[57841]: pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:41.230 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:41.230 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:41.230 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:41.230 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (119s) 102s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:41.230 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 102s ago 12m - - 2026-04-15T14:26:41.472 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:41.472 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:41.472 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:42.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:41 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3526858073' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:42.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:41 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3526858073' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:42 vm04.local ceph-mon[53345]: from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:42 vm04.local ceph-mon[53345]: pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:42 vm04.local ceph-mon[53345]: from='client.16396 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:42 vm05.local ceph-mon[57841]: from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:42 vm05.local ceph-mon[57841]: pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:42 vm05.local ceph-mon[57841]: from='client.16396 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:45.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:44 vm04.local ceph-mon[53345]: pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:45.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:44 vm05.local ceph-mon[57841]: pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:26:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:26:46.691 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:46.878 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:46.878 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:46.878 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:46.878 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 108s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:46.878 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 108s ago 12m - - 2026-04-15T14:26:47.106 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:47.106 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:47.106 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:47.106 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:46 vm04.local ceph-mon[53345]: pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:47.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:46 vm05.local ceph-mon[57841]: pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:48.094 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:47 vm05.local ceph-mon[57841]: from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:48.094 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:47 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2212437794' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:48.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:47 vm04.local ceph-mon[53345]: from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:48.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:47 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2212437794' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:49.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:48 vm04.local ceph-mon[53345]: from='client.25599 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:48 vm04.local ceph-mon[53345]: pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:49.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:48 vm05.local ceph-mon[57841]: from='client.25599 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:49.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:48 vm05.local ceph-mon[57841]: pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:51.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:50 vm04.local ceph-mon[53345]: pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:51.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:50 vm05.local ceph-mon[57841]: pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:52.323 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:52.519 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:52.519 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 12m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:52.519 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 12m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:52.519 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 113s ago 12m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:52.519 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 113s ago 12m - - 2026-04-15T14:26:52.766 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:52.766 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:52.766 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:53.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:52 vm04.local ceph-mon[53345]: pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:53.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:52 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/706296962' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:53.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:52 vm05.local ceph-mon[57841]: pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:53.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:52 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/706296962' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:53 vm05.local ceph-mon[57841]: from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:53 vm05.local ceph-mon[57841]: from='client.16420 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:54.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:53 vm04.local ceph-mon[53345]: from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:54.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:53 vm04.local ceph-mon[53345]: from='client.16420 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:55.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:54 vm04.local ceph-mon[53345]: pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:55.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:55 vm05.local ceph-mon[57841]: pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:56.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:55 vm04.local ceph-mon[53345]: pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:56.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:56 vm05.local ceph-mon[57841]: pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:26:57.980 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:26:58.159 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:26:58.159 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:26:58.159 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:26:58.159 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 119s ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:26:58.159 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 119s ago 13m - - 2026-04-15T14:26:58.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:58 vm04.local ceph-mon[53345]: pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:58.393 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:26:58.394 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:26:58.394 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:26:58.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:58 vm05.local ceph-mon[57841]: pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:26:59.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:59 vm04.local ceph-mon[53345]: from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:59.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:59 vm04.local ceph-mon[53345]: from='client.16432 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:59.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:26:59 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/503436139' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:26:59.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:59 vm05.local ceph-mon[57841]: from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:59.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:59 vm05.local ceph-mon[57841]: from='client.16432 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:26:59.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:26:59 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/503436139' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:27:00.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:27:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:27:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:27:00.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:27:00.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-04-15T14:27:00.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:27:00.693 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:27:02.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:02 vm04.local ceph-mon[53345]: pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-04-15T14:27:02.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:02 vm05.local ceph-mon[57841]: pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-04-15T14:27:03.613 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:03.798 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:03.798 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:03.798 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:03.798 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:03.798 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:04.041 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:04.042 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:04.042 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:04.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:04 vm04.local ceph-mon[53345]: pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-04-15T14:27:04.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:04 vm04.local ceph-mon[53345]: from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:04.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:04 vm04.local ceph-mon[53345]: from='client.16444 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:04.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:04 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3275834652' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:04.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:04 vm05.local ceph-mon[57841]: pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-04-15T14:27:04.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:04 vm05.local ceph-mon[57841]: from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:04.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:04 vm05.local ceph-mon[57841]: from='client.16444 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:04.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:04 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3275834652' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:06.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:06 vm04.local ceph-mon[53345]: pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-04-15T14:27:06.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:06 vm05.local ceph-mon[57841]: pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-04-15T14:27:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:08 vm04.local ceph-mon[53345]: pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-04-15T14:27:08.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:08 vm05.local ceph-mon[57841]: pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-04-15T14:27:09.255 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:09.453 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:09.454 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:09.454 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:09.454 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:09.454 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:09.700 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:09.700 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:09.700 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:10.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:10 vm04.local ceph-mon[53345]: from='client.16452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:10.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:10 vm04.local ceph-mon[53345]: from='client.16456 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:10.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:10 vm04.local ceph-mon[53345]: pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:27:10.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:10 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3612984130' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:10.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:10 vm05.local ceph-mon[57841]: from='client.16452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:10.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:10 vm05.local ceph-mon[57841]: from='client.16456 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:10.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:10 vm05.local ceph-mon[57841]: pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:27:10.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:10 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3612984130' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:12.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:12 vm04.local ceph-mon[53345]: pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:27:12.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:12 vm05.local ceph-mon[57841]: pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:27:14.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:14 vm04.local ceph-mon[53345]: pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:14.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:14.906 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:14 vm05.local ceph-mon[57841]: pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:14.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:15.093 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:15.093 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (7m) 2m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:15.093 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:15.093 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:15.093 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:15.324 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:15.324 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:15.324 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:16.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:16 vm04.local ceph-mon[53345]: from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:16.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:16 vm04.local ceph-mon[53345]: from='client.16468 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:16.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:16 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3431612273' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:16.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:16 vm04.local ceph-mon[53345]: pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:16.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:16 vm05.local ceph-mon[57841]: from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:16.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:16 vm05.local ceph-mon[57841]: from='client.16468 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:16.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:16 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3431612273' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:16.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:16 vm05.local ceph-mon[57841]: pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:18.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:18 vm04.local ceph-mon[53345]: pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:18.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:18 vm05.local ceph-mon[57841]: pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:20.537 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:20.722 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:20.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 2m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:20.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (2m) 2m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:20.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:20.722 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:20.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:20 vm04.local ceph-mon[53345]: pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:20 vm05.local ceph-mon[57841]: pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:20.961 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:20.961 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:20.961 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:21.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:21 vm04.local ceph-mon[53345]: from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:21.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:21 vm04.local ceph-mon[53345]: from='client.16480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:21.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:21 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/305712196' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:21.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:21 vm05.local ceph-mon[57841]: from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:21.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:21 vm05.local ceph-mon[57841]: from='client.16480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:21.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:21 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/305712196' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:22.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:22 vm04.local ceph-mon[53345]: pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:22.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:22 vm05.local ceph-mon[57841]: pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:24.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:24 vm04.local ceph-mon[53345]: pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:24.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:24 vm05.local ceph-mon[57841]: pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:26.181 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:26.380 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:26.381 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:26.381 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:26.381 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:26.381 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:26.582 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:26 vm04.local ceph-mon[53345]: pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:26.622 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:26.622 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:26.622 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:26.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:26 vm05.local ceph-mon[57841]: pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:27.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:27 vm04.local ceph-mon[53345]: from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:27.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:27 vm04.local ceph-mon[53345]: from='client.16492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:27.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:27 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3067201866' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:27.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:27 vm05.local ceph-mon[57841]: from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:27.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:27 vm05.local ceph-mon[57841]: from='client.16492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:27.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:27 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3067201866' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:28.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:28 vm04.local ceph-mon[53345]: pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:28.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:28 vm05.local ceph-mon[57841]: pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:29.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:29.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:30.867 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:30 vm04.local ceph-mon[53345]: pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:30.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:30 vm05.local ceph-mon[57841]: pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:31.839 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:32.023 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:32.023 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:32.023 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:32.023 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:32.024 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:32.254 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:32.254 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:32.254 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:32.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:32 vm05.local ceph-mon[57841]: pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:32.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:32 vm05.local ceph-mon[57841]: from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:32.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:32 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3050373141' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:33.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:32 vm04.local ceph-mon[53345]: pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:33.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:32 vm04.local ceph-mon[53345]: from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:33.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:32 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3050373141' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:33.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:33 vm05.local ceph-mon[57841]: from='client.16504 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:34.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:33 vm04.local ceph-mon[53345]: from='client.16504 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:34.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:34 vm05.local ceph-mon[57841]: pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:35.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:34 vm04.local ceph-mon[53345]: pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:36.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:36 vm05.local ceph-mon[57841]: pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:37.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:36 vm04.local ceph-mon[53345]: pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:37.471 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:37.655 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:37.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:37.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:37.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (2m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:37.655 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:37.893 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:37.893 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:37.893 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:38.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:38 vm05.local ceph-mon[57841]: from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:38.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:38 vm05.local ceph-mon[57841]: pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:38.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:38 vm05.local ceph-mon[57841]: from='client.16516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:38.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:38 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4281551954' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:39.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:38 vm04.local ceph-mon[53345]: from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:39.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:38 vm04.local ceph-mon[53345]: pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:39.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:38 vm04.local ceph-mon[53345]: from='client.16516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:39.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:38 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4281551954' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:40.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:40 vm05.local ceph-mon[57841]: pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:41.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:40 vm04.local ceph-mon[53345]: pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:43.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:42 vm04.local ceph-mon[53345]: pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:27:43.121 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:43.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:42 vm05.local ceph-mon[57841]: pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:27:43.319 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:43.320 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:43.320 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:43.320 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:43.320 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:43.567 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:43.567 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:43.567 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:43.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:43 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3411755580' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:44.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:43 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3411755580' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:45.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:44 vm04.local ceph-mon[53345]: from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:45.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:44 vm04.local ceph-mon[53345]: from='client.16528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:45.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:44 vm04.local ceph-mon[53345]: pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:45.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:44 vm05.local ceph-mon[57841]: from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:44 vm05.local ceph-mon[57841]: from='client.16528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:44 vm05.local ceph-mon[57841]: pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:27:47.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:46 vm04.local ceph-mon[53345]: pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:47.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:46 vm05.local ceph-mon[57841]: pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:48.770 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:48.948 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:48.948 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:48.948 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 13m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:48.948 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 2m ago 13m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:48.948 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 13m - - 2026-04-15T14:27:48.994 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:48 vm05.local ceph-mon[57841]: pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:48 vm04.local ceph-mon[53345]: pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:49.187 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:49.187 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:49.187 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:50.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:49 vm04.local ceph-mon[53345]: from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:50.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:49 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3785550342' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:50.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:49 vm05.local ceph-mon[57841]: from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:50.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:49 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3785550342' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:51.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:50 vm04.local ceph-mon[53345]: from='client.16540 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:51.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:50 vm04.local ceph-mon[53345]: pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:51.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:50 vm05.local ceph-mon[57841]: from='client.16540 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:51.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:50 vm05.local ceph-mon[57841]: pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:53.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:52 vm04.local ceph-mon[53345]: pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:53.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:52 vm05.local ceph-mon[57841]: pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:54.390 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:27:54.571 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:27:54.571 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 13m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:27:54.571 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:27:54.571 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 2m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:27:54.571 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 2m ago 14m - - 2026-04-15T14:27:54.743 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:54 vm04.local ceph-mon[53345]: pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:54.802 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:27:54.802 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:27:54.802 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:27:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:54 vm05.local ceph-mon[57841]: pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:27:56.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:55 vm04.local ceph-mon[53345]: from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:55 vm04.local ceph-mon[53345]: from='client.16552 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:55 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3803909734' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:55 vm05.local ceph-mon[57841]: from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:55 vm05.local ceph-mon[57841]: from='client.16552 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:27:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:55 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3803909734' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:27:57.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:56 vm04.local ceph-mon[53345]: pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:57.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:56 vm05.local ceph-mon[57841]: pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:59.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:58 vm04.local ceph-mon[53345]: pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:27:59.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:58 vm05.local ceph-mon[57841]: pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:00.021 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:00.021 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:27:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:28:00.021 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:27:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:28:00.200 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:00.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:00.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:00.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:00.200 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:00.427 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:00.427 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:00.427 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 199 B/s rd, 398 B/s wr, 0 op/s 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 247 B/s rd, 494 B/s wr, 0 op/s 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:28:01.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:00 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2038328667' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 199 B/s rd, 398 B/s wr, 0 op/s 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 247 B/s rd, 494 B/s wr, 0 op/s 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:28:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:00 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2038328667' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:02.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:01 vm04.local ceph-mon[53345]: from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:01 vm04.local ceph-mon[53345]: from='client.16564 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:01 vm05.local ceph-mon[57841]: from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:01 vm05.local ceph-mon[57841]: from='client.16564 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:03.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:02 vm04.local ceph-mon[53345]: pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:28:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:02 vm05.local ceph-mon[57841]: pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:28:05.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:04 vm04.local ceph-mon[53345]: pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:28:05.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:04 vm05.local ceph-mon[57841]: pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:28:05.633 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:05.815 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:05.815 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:05.815 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:05.815 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:05.815 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:06.046 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:06.046 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:06.046 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:07.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:06 vm04.local ceph-mon[53345]: from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:07.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:06 vm04.local ceph-mon[53345]: pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:28:07.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:06 vm04.local ceph-mon[53345]: from='client.25719 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:07.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:06 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3673825061' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:07.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:06 vm05.local ceph-mon[57841]: from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:07.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:06 vm05.local ceph-mon[57841]: pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:28:07.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:06 vm05.local ceph-mon[57841]: from='client.25719 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:07.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:06 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3673825061' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:09.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:08 vm04.local ceph-mon[53345]: pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:09.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:08 vm05.local ceph-mon[57841]: pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:11.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:10 vm04.local ceph-mon[53345]: pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:11.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:10 vm05.local ceph-mon[57841]: pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:11.250 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:11.427 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:11.427 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:11.427 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:11.427 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:11.427 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:11.652 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:11.652 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:11.653 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:12.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:11 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2953031501' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:12.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:11 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2953031501' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:13.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:12 vm04.local ceph-mon[53345]: from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:13.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:12 vm04.local ceph-mon[53345]: from='client.16588 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:13.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:12 vm04.local ceph-mon[53345]: pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:13.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:12 vm05.local ceph-mon[57841]: from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:13.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:12 vm05.local ceph-mon[57841]: from='client.16588 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:13.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:12 vm05.local ceph-mon[57841]: pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:15.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:14 vm05.local ceph-mon[57841]: pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:15.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:15.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:14 vm04.local ceph-mon[53345]: pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:15.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:16.856 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:17.036 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:17.036 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (8m) 3m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:17.036 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:17.036 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:17.036 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:17.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:16 vm04.local ceph-mon[53345]: pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:17.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:16 vm05.local ceph-mon[57841]: pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:17.254 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:17.254 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:17.254 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:18.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:17 vm05.local ceph-mon[57841]: from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:18.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:17 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2197585340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:18.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:17 vm04.local ceph-mon[53345]: from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:18.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:17 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2197585340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:19.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:18 vm05.local ceph-mon[57841]: from='client.16600 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:19.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:18 vm05.local ceph-mon[57841]: pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:19.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:18 vm04.local ceph-mon[53345]: from='client.16600 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:19.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:18 vm04.local ceph-mon[53345]: pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:20.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:19 vm04.local ceph-mon[53345]: pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:20.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:19 vm05.local ceph-mon[57841]: pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:22.464 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:22.637 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:22.637 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 3m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:22.637 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (3m) 3m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:22.637 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:22.637 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:22.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:22 vm04.local ceph-mon[53345]: pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:22.859 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:22.859 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:22.859 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:23.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:22 vm05.local ceph-mon[57841]: pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:24.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:23 vm04.local ceph-mon[53345]: from='client.16608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:24.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:23 vm04.local ceph-mon[53345]: from='client.16612 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:24.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:23 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3488015818' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:23 vm05.local ceph-mon[57841]: from='client.16608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:23 vm05.local ceph-mon[57841]: from='client.16612 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:24.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:23 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3488015818' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:25.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:24 vm04.local ceph-mon[53345]: pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:25.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:24 vm05.local ceph-mon[57841]: pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:27.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:26 vm04.local ceph-mon[53345]: pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:27.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:26 vm05.local ceph-mon[57841]: pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:28.062 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:28.245 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:28.245 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:28.245 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:28.245 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:28.245 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:28.496 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:28.496 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:28.496 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:29.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:28 vm05.local ceph-mon[57841]: pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:29.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:28 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1829028543' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:29.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:28 vm04.local ceph-mon[53345]: pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:29.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:28 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1829028543' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:29 vm04.local ceph-mon[53345]: from='client.16620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:29 vm04.local ceph-mon[53345]: from='client.16624 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:30.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:29 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:30.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:29 vm05.local ceph-mon[57841]: from='client.16620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:30.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:29 vm05.local ceph-mon[57841]: from='client.16624 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:30.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:29 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:31.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:30 vm05.local ceph-mon[57841]: pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:31.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:30 vm04.local ceph-mon[53345]: pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:33.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:32 vm05.local ceph-mon[57841]: pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:33.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:32 vm04.local ceph-mon[53345]: pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:33.710 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:33.903 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:33.903 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:33.903 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:33.903 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:33.904 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:34.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:33 vm04.local ceph-mon[53345]: from='client.16632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:34.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:33 vm04.local ceph-mon[53345]: pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:34.136 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:34.136 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:34.136 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:34.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:33 vm05.local ceph-mon[57841]: from='client.16632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:34.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:33 vm05.local ceph-mon[57841]: pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:34 vm05.local ceph-mon[57841]: from='client.16636 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:35.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:34 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4105119513' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:35.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:34 vm04.local ceph-mon[53345]: from='client.16636 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:35.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:34 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4105119513' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:36.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:35 vm04.local ceph-mon[53345]: pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:36.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:35 vm05.local ceph-mon[57841]: pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:39.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:38 vm04.local ceph-mon[53345]: pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:39.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:38 vm05.local ceph-mon[57841]: pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:39.345 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:39.542 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:39.543 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:39.543 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:39.543 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (3m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:39.543 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:39.776 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:39.776 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:39.776 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:40.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:39 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1956175330' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:40.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:39 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1956175330' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:40 vm04.local ceph-mon[53345]: from='client.16644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:40 vm04.local ceph-mon[53345]: from='client.16648 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:41.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:40 vm04.local ceph-mon[53345]: pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:41.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:40 vm05.local ceph-mon[57841]: from='client.16644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:41.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:40 vm05.local ceph-mon[57841]: from='client.16648 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:41.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:40 vm05.local ceph-mon[57841]: pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:42.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:41 vm04.local ceph-mon[53345]: pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:42.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:41 vm05.local ceph-mon[57841]: pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:28:44.983 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:45.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:44 vm04.local ceph-mon[53345]: pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:45.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:44 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:45.168 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:45.168 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:45.168 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:45.168 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:45.168 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:44 vm05.local ceph-mon[57841]: pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:45.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:44 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:28:45.404 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:45.404 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:45.404 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:46.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:45 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1970494132' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:46.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:45 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1970494132' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:47.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:46 vm05.local ceph-mon[57841]: from='client.16656 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:47.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:46 vm05.local ceph-mon[57841]: from='client.16660 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:47.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:46 vm05.local ceph-mon[57841]: pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:47.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:46 vm04.local ceph-mon[53345]: from='client.16656 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:47.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:46 vm04.local ceph-mon[53345]: from='client.16660 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:47.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:46 vm04.local ceph-mon[53345]: pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:49.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:48 vm05.local ceph-mon[57841]: pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:49.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:48 vm04.local ceph-mon[53345]: pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:49 vm04.local ceph-mon[53345]: pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:50.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:49 vm05.local ceph-mon[57841]: pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:50.608 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:50.792 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:50.792 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 14m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:50.792 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 14m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:50.792 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 3m ago 14m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:50.792 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 14m - - 2026-04-15T14:28:51.027 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:51.027 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:51.027 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:51.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:50 vm04.local ceph-mon[53345]: from='client.25787 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:51.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:50 vm04.local ceph-mon[53345]: from='client.25791 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:51.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:50 vm05.local ceph-mon[57841]: from='client.25787 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:51.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:50 vm05.local ceph-mon[57841]: from='client.25791 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:52.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:51 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1551405248' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:52.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:51 vm04.local ceph-mon[53345]: pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:52.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:51 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1551405248' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:52.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:51 vm05.local ceph-mon[57841]: pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:55.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:54 vm04.local ceph-mon[53345]: pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:54 vm05.local ceph-mon[57841]: pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:28:56.233 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:28:56.412 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:28:56.412 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:28:56.412 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:28:56.412 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 3m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:28:56.412 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 3m ago 15m - - 2026-04-15T14:28:56.636 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:28:56.636 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:28:56.636 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:28:57.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:56 vm04.local ceph-mon[53345]: pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:57.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:56 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/4247811867' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:57.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:56 vm05.local ceph-mon[57841]: pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:57.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:56 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/4247811867' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:28:58.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:57 vm04.local ceph-mon[53345]: from='client.16680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:58.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:57 vm04.local ceph-mon[53345]: from='client.16684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:58.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:57 vm05.local ceph-mon[57841]: from='client.16680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:58.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:57 vm05.local ceph-mon[57841]: from='client.16684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:28:59.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:58 vm05.local ceph-mon[57841]: pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:28:59.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:58 vm04.local ceph-mon[53345]: pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:00.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:28:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:28:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:00 vm05.local ceph-mon[57841]: pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:29:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:00 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:01.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:00 vm04.local ceph-mon[53345]: pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:29:01.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:01.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:01.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:01.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:00 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:01.850 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:02.038 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:02.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:02.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:02.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:02.038 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:01 vm04.local ceph-mon[53345]: pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 199 B/s rd, 398 B/s wr, 0 op/s 2026-04-15T14:29:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:01 vm04.local ceph-mon[53345]: pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 247 B/s rd, 495 B/s wr, 0 op/s 2026-04-15T14:29:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:01 vm05.local ceph-mon[57841]: pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 199 B/s rd, 398 B/s wr, 0 op/s 2026-04-15T14:29:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:01 vm05.local ceph-mon[57841]: pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 247 B/s rd, 495 B/s wr, 0 op/s 2026-04-15T14:29:02.267 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:02.267 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:02.267 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:02 vm05.local ceph-mon[57841]: from='client.16692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:02 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2006620974' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:03.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:02 vm04.local ceph-mon[53345]: from='client.16692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:03.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:02 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2006620974' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:04.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:03 vm05.local ceph-mon[57841]: from='client.16696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:04.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:03 vm05.local ceph-mon[57841]: pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:29:04.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:03 vm04.local ceph-mon[53345]: from='client.16696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:04.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:03 vm04.local ceph-mon[53345]: pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:29:05.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:04 vm05.local ceph-mon[57841]: pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:29:05.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:04 vm04.local ceph-mon[53345]: pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:29:07.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:07 vm05.local ceph-mon[57841]: pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:29:07.495 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:07.495 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:07 vm04.local ceph-mon[53345]: pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 247 B/s wr, 0 op/s 2026-04-15T14:29:07.677 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:07.678 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:07.678 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:07.678 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:07.678 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:07.917 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:07.917 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:07.917 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:08.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:08 vm04.local ceph-mon[53345]: from='client.16704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:08.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:08 vm04.local ceph-mon[53345]: from='client.16708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:08.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:08 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1116719246' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:08.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:08 vm05.local ceph-mon[57841]: from='client.16704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:08.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:08 vm05.local ceph-mon[57841]: from='client.16708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:08.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:08 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1116719246' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:09.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:09 vm05.local ceph-mon[57841]: pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:09.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:09 vm04.local ceph-mon[53345]: pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:11.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:11 vm05.local ceph-mon[57841]: pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:11.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:11 vm04.local ceph-mon[53345]: pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:13.122 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:13.313 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:13.314 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (9m) 4m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:13.314 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:13.314 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:13.314 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:13.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:13 vm05.local ceph-mon[57841]: pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:13.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:13 vm04.local ceph-mon[53345]: pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:13.537 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:13.537 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:13.537 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:14.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:14 vm05.local ceph-mon[57841]: from='client.16716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:14.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:14 vm05.local ceph-mon[57841]: from='client.16720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:14.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:14 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2992355219' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:14.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:14 vm04.local ceph-mon[53345]: from='client.16716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:14.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:14 vm04.local ceph-mon[53345]: from='client.16720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:14.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:14 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2992355219' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:15.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:15 vm05.local ceph-mon[57841]: pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:15.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:15.519 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:15 vm04.local ceph-mon[53345]: pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:15.519 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:17.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:17 vm05.local ceph-mon[57841]: pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:17.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:17 vm04.local ceph-mon[53345]: pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:18.737 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:18.917 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:18.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (10m) 4m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:18.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (4m) 4m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:18.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:18.917 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:19.150 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:19.150 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:19.150 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:19.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:19 vm05.local ceph-mon[57841]: pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:19.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:19 vm05.local ceph-mon[57841]: from='client.16728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:19.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:19 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/189224764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:19.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:19 vm04.local ceph-mon[53345]: pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:19.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:19 vm04.local ceph-mon[53345]: from='client.16728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:19.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:19 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/189224764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:20.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:20 vm05.local ceph-mon[57841]: from='client.16732 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:20.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:20 vm04.local ceph-mon[53345]: from='client.16732 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:21.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:21 vm05.local ceph-mon[57841]: pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:21.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:21 vm04.local ceph-mon[53345]: pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:23.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:23 vm05.local ceph-mon[57841]: pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 170 B/s wr, 24 op/s 2026-04-15T14:29:23.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:23 vm04.local ceph-mon[53345]: pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 170 B/s wr, 24 op/s 2026-04-15T14:29:24.380 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:24.560 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:24.560 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (10m) 4m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:24.560 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 4m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:24.560 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:24.560 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:24.803 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:24.803 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:24.803 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:25.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:25 vm05.local ceph-mon[57841]: pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T14:29:25.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:25 vm05.local ceph-mon[57841]: from='client.16740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:25.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:25 vm05.local ceph-mon[57841]: from='client.16744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:25.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:25 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1715771875' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:25.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:25 vm04.local ceph-mon[53345]: pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T14:29:25.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:25 vm04.local ceph-mon[53345]: from='client.16740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:25.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:25 vm04.local ceph-mon[53345]: from='client.16744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:25.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:25 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1715771875' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:27.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:27 vm04.local ceph-mon[53345]: pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T14:29:27.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:27 vm05.local ceph-mon[57841]: pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T14:29:29.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:29 vm04.local ceph-mon[53345]: pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T14:29:29.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:29 vm05.local ceph-mon[57841]: pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T14:29:30.011 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:30.227 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:30.227 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (10m) 5m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:30.227 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 5m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:30.227 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:30.227 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:30.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:30 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:30.462 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:30.462 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:30.462 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:30.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:30 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:31.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:31 vm04.local ceph-mon[53345]: from='client.16752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:31.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:31 vm04.local ceph-mon[53345]: pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T14:29:31.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:31 vm04.local ceph-mon[53345]: from='client.25857 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:31.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:31 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2124916830' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:31.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:31 vm05.local ceph-mon[57841]: from='client.16752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:31.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:31 vm05.local ceph-mon[57841]: pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T14:29:31.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:31 vm05.local ceph-mon[57841]: from='client.25857 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:31.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:31 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2124916830' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:33.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:33 vm04.local ceph-mon[53345]: pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T14:29:33.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:33 vm05.local ceph-mon[57841]: pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T14:29:35.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:35 vm04.local ceph-mon[53345]: pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T14:29:35.680 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:35.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:35 vm05.local ceph-mon[57841]: pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T14:29:35.867 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:35.867 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (10m) 5m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:35.867 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 5m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:35.867 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:35.867 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:36.086 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:36.086 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:36.086 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:36.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:36 vm04.local ceph-mon[53345]: from='client.25863 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:36.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:36 vm04.local ceph-mon[53345]: from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:36.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:36 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/1451246796' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:36.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:36 vm05.local ceph-mon[57841]: from='client.25863 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:36.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:36 vm05.local ceph-mon[57841]: from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:36.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:36 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/1451246796' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:37.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:37 vm04.local ceph-mon[53345]: pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:37.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:37 vm05.local ceph-mon[57841]: pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:39.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:39 vm04.local ceph-mon[53345]: pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:39.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:39 vm05.local ceph-mon[57841]: pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:41.289 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:41.468 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:41.468 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (10m) 5m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:41.468 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 5m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:41.468 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (4m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:41.468 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:41.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:41 vm04.local ceph-mon[53345]: pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:41.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:41 vm05.local ceph-mon[57841]: pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:41.700 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:41.700 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:41.700 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:42.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:42 vm04.local ceph-mon[53345]: from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:42.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:42 vm04.local ceph-mon[53345]: from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:42.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:42 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2521495765' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:42.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:42 vm05.local ceph-mon[57841]: from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:42.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:42 vm05.local ceph-mon[57841]: from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:42.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:42 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2521495765' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:43.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:43 vm04.local ceph-mon[53345]: pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:43.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:43 vm05.local ceph-mon[57841]: pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:29:45.617 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:45 vm04.local ceph-mon[53345]: pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:45.618 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:45 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:45.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:45 vm05.local ceph-mon[57841]: pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:45.692 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:45 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:29:46.903 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to stop 2026-04-15T14:29:46.980 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:29:46.980 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:29:46.981 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:29:47.158 INFO:teuthology.orchestra.run.vm04.stdout:anonymousScheduled to start rgw.foo.vm05.pzlhsk on host 'vm05' 2026-04-15T14:29:47.364 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for rgw.foo.vm05.pzlhsk to start 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:47.505 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:47 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:47.536 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:47.536 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (10m) 5m ago 15m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:29:47.536 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 5m ago 15m 98.2M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:29:47.536 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 4m ago 15m 100M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:29:47.536 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 error 4m ago 15m - - 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:47.537 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:47 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:47.803 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T14:29:47.803 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T14:29:47.803 INFO:teuthology.orchestra.run.vm04.stdout: daemon rgw.foo.vm05.pzlhsk on vm05 is in error state 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm05.pzlhsk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: Schedule start daemon rgw.foo.vm05.pzlhsk 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 184 B/s wr, 0 op/s 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 112 B/s rd, 225 B/s wr, 0 op/s 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:48.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/163109969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:48.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:48.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:48.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:48.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:48.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:48.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:48 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:49.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm05.pzlhsk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: Schedule start daemon rgw.foo.vm05.pzlhsk 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 184 B/s wr, 0 op/s 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 112 B/s rd, 225 B/s wr, 0 op/s 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/163109969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:49.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:48 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:49 vm05.local ceph-mon[57841]: pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 238 B/s wr, 47 op/s 2026-04-15T14:29:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:49 vm05.local ceph-mon[57841]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T14:29:49.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:49 vm05.local ceph-mon[57841]: Cluster is now healthy 2026-04-15T14:29:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:49 vm04.local ceph-mon[53345]: pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 238 B/s wr, 47 op/s 2026-04-15T14:29:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:49 vm04.local ceph-mon[53345]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T14:29:50.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:49 vm04.local ceph-mon[53345]: Cluster is now healthy 2026-04-15T14:29:51.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:51 vm05.local ceph-mon[57841]: pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 238 B/s wr, 117 op/s 2026-04-15T14:29:52.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:51 vm04.local ceph-mon[53345]: pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 238 B/s wr, 117 op/s 2026-04-15T14:29:53.001 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (5s) 4s ago 16m 98.6M - 20.2.0-19-g7ec4401a095 259950fb12cb 06587447d970 2026-04-15T14:29:53.002 INFO:teuthology.orchestra.run.vm04.stdout:Check with each haproxy down in turn... 2026-04-15T14:29:53.433 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to stop haproxy.rgw.foo.vm04.lpycfq on host 'vm04' 2026-04-15T14:29:53.656 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for haproxy.rgw.foo.vm04.lpycfq to stop 2026-04-15T14:29:53.844 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:29:53.844 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm04.lpycfq vm04 *:9000,9001 running (15m) 5m ago 15m 7650k - 2.3.17-d1c9119 e85424b0d443 92a7fdda6771 2026-04-15T14:29:53.844 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm05.ffntij vm05 *:9000,9001 running (15m) 5s ago 15m 4442k - 2.3.17-d1c9119 e85424b0d443 b12fa96855df 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 0 B/s wr, 152 op/s 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:54.081 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:53 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:54.081 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 250 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 0 B/s wr, 152 op/s 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:54.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:53 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='client.16822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm04.lpycfq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: Schedule stop daemon haproxy.rgw.foo.vm04.lpycfq 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='client.16834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:55.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:54 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2047213579' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='client.16822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm04.lpycfq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: Schedule stop daemon haproxy.rgw.foo.vm04.lpycfq 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='client.16834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:29:55.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:54 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2047213579' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:29:56.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:55 vm04.local ceph-mon[53345]: pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 238 B/s wr, 152 op/s 2026-04-15T14:29:56.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:55 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:56.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:55 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:55 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:55 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:55 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:56.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:55 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:55 vm05.local ceph-mon[57841]: pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 238 B/s wr, 152 op/s 2026-04-15T14:29:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:55 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:55 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:55 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:29:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:55 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:29:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:55 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:29:56.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:55 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:29:58.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:57 vm04.local ceph-mon[53345]: pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 84 KiB/s rd, 215 B/s wr, 137 op/s 2026-04-15T14:29:58.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:57 vm05.local ceph-mon[57841]: pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 84 KiB/s rd, 215 B/s wr, 137 op/s 2026-04-15T14:29:59.302 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm04.lpycfq vm04 *:9000,9001 stopped 3s ago 15m - - 2026-04-15T14:29:59.315 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:29:59.315 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:29:59.315 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-04-15T14:29:59.315 INFO:teuthology.orchestra.run.vm04.stderr:curl: (7) Failed to connect to 12.12.1.104 port 9000: Connection refused 2026-04-15T14:29:59.315 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for http://12.12.1.104:9000/ to be available 2026-04-15T14:30:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:59 vm04.local ceph-mon[53345]: pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 356 B/s wr, 114 op/s 2026-04-15T14:30:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:00.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:29:59 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:30:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:59 vm05.local ceph-mon[57841]: pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 356 B/s wr, 114 op/s 2026-04-15T14:30:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:00.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:29:59 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:30:00.329 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:30:00.329 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:30:00.329 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-04-15T14:30:00.329 INFO:teuthology.orchestra.run.vm04.stderr:curl: (7) Failed to connect to 12.12.1.104 port 9000: Connection refused 2026-04-15T14:30:00.329 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for http://12.12.1.104:9000/ to be available 2026-04-15T14:30:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:00 vm05.local ceph-mon[57841]: from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:01.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:00 vm05.local ceph-mon[57841]: overall HEALTH_OK 2026-04-15T14:30:01.332 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:30:01.332 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:30:01.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:00 vm04.local ceph-mon[53345]: from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:01.332 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:00 vm04.local ceph-mon[53345]: overall HEALTH_OK 2026-04-15T14:30:01.333 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:30:01.528 INFO:teuthology.orchestra.run.vm04.stdout:anonymousScheduled to start haproxy.rgw.foo.vm04.lpycfq on host 'vm04' 2026-04-15T14:30:01.756 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for haproxy.rgw.foo.vm04.lpycfq to start 2026-04-15T14:30:01.959 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:30:01.959 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm04.lpycfq vm04 *:9000,9001 stopped 6s ago 15m - - 2026-04-15T14:30:01.959 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm05.ffntij vm05 *:9000,9001 running (16m) 13s ago 16m 4442k - 2.3.17-d1c9119 e85424b0d443 b12fa96855df 2026-04-15T14:30:02.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 341 B/s wr, 75 op/s 2026-04-15T14:30:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:02.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:01 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 341 B/s wr, 75 op/s 2026-04-15T14:30:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:02.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:02.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:02.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:02.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:02.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:01 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:02.195 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:30:03.187 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:02 vm04.local ceph-mon[53345]: from='client.16846 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm04.lpycfq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:03.187 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:02 vm04.local ceph-mon[53345]: Schedule start daemon haproxy.rgw.foo.vm04.lpycfq 2026-04-15T14:30:03.187 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:02 vm04.local ceph-mon[53345]: from='client.16850 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:03.187 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:02 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/3462665319' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:30:03.187 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:02 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:03.187 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:02 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:03.187 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:02 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:02 vm05.local ceph-mon[57841]: from='client.16846 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm04.lpycfq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:02 vm05.local ceph-mon[57841]: Schedule start daemon haproxy.rgw.foo.vm04.lpycfq 2026-04-15T14:30:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:02 vm05.local ceph-mon[57841]: from='client.16850 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:02 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/3462665319' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:30:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:02 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:02 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:03.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:02 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:04.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:03 vm04.local ceph-mon[53345]: from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:04.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:03 vm04.local ceph-mon[53345]: pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 341 B/s wr, 25 op/s 2026-04-15T14:30:04.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:03 vm05.local ceph-mon[57841]: from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:04.193 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:03 vm05.local ceph-mon[57841]: pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 341 B/s wr, 25 op/s 2026-04-15T14:30:05.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:05 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:05.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:05 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:05.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:05 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:05.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:05 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:05.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:05 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:05.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:05 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:05.442 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:05 vm05.local ceph-mon[57841]: pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:05.601 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:05 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:05.601 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:05 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:05.601 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:05 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:05.601 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:05 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:05.601 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:05 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:05.601 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:05 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:05.601 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:05 vm04.local ceph-mon[53345]: pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:07.419 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm04.lpycfq vm04 *:9000,9001 running (5s) 3s ago 16m 4022k - 2.3.17-d1c9119 e85424b0d443 155089680ecb 2026-04-15T14:30:07.623 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to stop haproxy.rgw.foo.vm05.ffntij on host 'vm05' 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:07.719 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:07 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:07.852 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for haproxy.rgw.foo.vm05.ffntij to stop 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:08.016 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:07 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:08.049 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:30:08.049 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm04.lpycfq vm04 *:9000,9001 running (5s) 3s ago 16m 4022k - 2.3.17-d1c9119 e85424b0d443 155089680ecb 2026-04-15T14:30:08.049 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm05.ffntij vm05 *:9000,9001 running (16m) 19s ago 16m 4442k - 2.3.17-d1c9119 e85424b0d443 b12fa96855df 2026-04-15T14:30:08.286 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: from='client.16866 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm05.ffntij", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: Schedule stop daemon haproxy.rgw.foo.vm05.ffntij 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: from='client.16870 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:08.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:08 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/554243443' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: from='client.16866 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm05.ffntij", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: Schedule stop daemon haproxy.rgw.foo.vm05.ffntij 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: from='client.16870 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:08.983 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:08 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/554243443' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:10.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:09 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:10.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:09 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:12.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:11 vm04.local ceph-mon[53345]: pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:30:12.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:11 vm05.local ceph-mon[57841]: pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:30:13.504 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm05.ffntij vm05 *:9000,9001 stopped 4s ago 16m - - 2026-04-15T14:30:13.509 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:30:13.509 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:30:13.509 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:30:13.757 INFO:teuthology.orchestra.run.vm04.stdout:anonymousScheduled to start haproxy.rgw.foo.vm05.ffntij on host 'vm05' 2026-04-15T14:30:13.975 INFO:teuthology.orchestra.run.vm04.stdout:Waiting for haproxy.rgw.foo.vm05.ffntij to start 2026-04-15T14:30:14.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:13 vm04.local ceph-mon[53345]: pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:30:14.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:13 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:14.165 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:30:14.165 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm04.lpycfq vm04 *:9000,9001 running (11s) 10s ago 16m 4022k - 2.3.17-d1c9119 e85424b0d443 155089680ecb 2026-04-15T14:30:14.165 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm05.ffntij vm05 *:9000,9001 stopped 5s ago 16m - - 2026-04-15T14:30:14.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:13 vm05.local ceph-mon[57841]: pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail 2026-04-15T14:30:14.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:13 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:14.428 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='client.16882 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm05.ffntij", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: Schedule start daemon haproxy.rgw.foo.vm05.ffntij 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:14.924 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:14 vm05.local ceph-mon[57841]: from='client.? 192.168.123.104:0/2836348945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:30:15.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='client.16882 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm05.ffntij", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: Schedule start daemon haproxy.rgw.foo.vm05.ffntij 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T14:30:15.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:14 vm04.local ceph-mon[53345]: from='client.? 192.168.123.104:0/2836348945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='client.16894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:16.118 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:15 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='client.16894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' 2026-04-15T14:30:16.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:15 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T14:30:18.117 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:17 vm04.local ceph-mon[53345]: pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:18.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:17 vm05.local ceph-mon[57841]: pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T14:30:19.645 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm05.ffntij vm05 *:9000,9001 running (5s) 4s ago 16m 4026k - 2.3.17-d1c9119 e85424b0d443 99f933f2f666 2026-04-15T14:30:19.649 INFO:teuthology.orchestra.run.vm04.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T14:30:19.650 INFO:teuthology.orchestra.run.vm04.stderr: Dload Upload Total Spent Left Speed 2026-04-15T14:30:19.650 INFO:teuthology.orchestra.run.vm04.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T14:30:19.720 INFO:teuthology.orchestra.run.vm04.stdout:anonymous 2026-04-15T14:30:19.720 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T14:30:19.722 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm04.local 2026-04-15T14:30:19.723 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-04-15T14:30:19.867 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:19.918 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:19 vm04.local ceph-mon[53345]: pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:19.963 INFO:teuthology.orchestra.run.vm04.stdout:167 167 2026-04-15T14:30:20.012 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch status' 2026-04-15T14:30:20.148 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:19 vm05.local ceph-mon[57841]: pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:20.150 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:20.549 INFO:teuthology.orchestra.run.vm04.stdout:Backend: cephadm 2026-04-15T14:30:20.549 INFO:teuthology.orchestra.run.vm04.stdout:Available: Yes 2026-04-15T14:30:20.549 INFO:teuthology.orchestra.run.vm04.stdout:Paused: No 2026-04-15T14:30:20.626 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch ps' 2026-04-15T14:30:20.763 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:20.813 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:20 vm04.local ceph-mon[53345]: from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:20.942 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:20 vm05.local ceph-mon[57841]: from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.vm04 vm04 *:9093,9094 running (17m) 17s ago 18m 21.4M - 0.28.1 91c01b3cec9b 046e19e1d7ef 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:ceph-exporter.vm04 vm04 *:9926 running (18m) 17s ago 18m 10.4M - 20.2.0-19-g7ec4401a095 259950fb12cb ac42288b7c30 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:ceph-exporter.vm05 vm05 *:9926 running (17m) 5s ago 17m 10.8M - 20.2.0-19-g7ec4401a095 259950fb12cb 973a0e5e1218 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:crash.vm04 vm04 running (18m) 17s ago 18m 11.1M - 20.2.0-19-g7ec4401a095 259950fb12cb f1ba08f4a680 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:crash.vm05 vm05 running (17m) 5s ago 17m 11.1M - 20.2.0-19-g7ec4401a095 259950fb12cb cd842580320f 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:grafana.vm04 vm04 *:3000 running (17m) 17s ago 17m 132M - 12.2.0 1849e2140421 f4986a9d59d0 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm04.lpycfq vm04 *:9000,9001 running (18s) 17s ago 16m 4022k - 2.3.17-d1c9119 e85424b0d443 155089680ecb 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:haproxy.rgw.foo.vm05.ffntij vm05 *:9000,9001 running (6s) 5s ago 16m 4026k - 2.3.17-d1c9119 e85424b0d443 99f933f2f666 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:keepalived.rgw.foo.vm04.uutjkf vm04 running (16m) 17s ago 16m 2373k - 2.2.4 4a3a1ff181d9 c5a618140fea 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:keepalived.rgw.foo.vm05.txbkes vm05 running (16m) 5s ago 16m 2377k - 2.2.4 4a3a1ff181d9 faedf82abd3a 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:mgr.vm04.ycniad vm04 *:9283,8765,8443 running (19m) 17s ago 19m 577M - 20.2.0-19-g7ec4401a095 259950fb12cb b8faa7587f88 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:mgr.vm05.ozgwuj vm05 *:8443,9283,8765 running (17m) 5s ago 17m 484M - 20.2.0-19-g7ec4401a095 259950fb12cb b7dc555fed45 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:mon.vm04 vm04 running (19m) 17s ago 19m 70.5M 2048M 20.2.0-19-g7ec4401a095 259950fb12cb 6257b904a435 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:mon.vm05 vm05 running (17m) 5s ago 17m 49.5M 2048M 20.2.0-19-g7ec4401a095 259950fb12cb c33af830112a 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.vm04 vm04 *:9100 running (18m) 17s ago 18m 16.1M - 1.9.1 255ec253085f 0afcbfea792f 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.vm05 vm05 *:9100 running (17m) 5s ago 17m 13.6M - 1.9.1 255ec253085f bef5815ebaec 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm05 running (17m) 5s ago 17m 87.5M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb d2151bc5647c 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (17m) 17s ago 17m 83.5M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb fda113eaca8f 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm05 running (16m) 5s ago 16m 84.1M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb 20816d9bae45 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (16m) 17s ago 16m 61.5M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb 7714f29306f4 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm05 running (16m) 5s ago 16m 81.7M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb b01e22f64201 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm04 running (16m) 17s ago 16m 75.5M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb c10f625df3e7 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm04 running (16m) 17s ago 16m 75.3M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb 7458cb7b5ae0 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm05 running (16m) 5s ago 16m 83.4M 4096M 20.2.0-19-g7ec4401a095 259950fb12cb aa065f2eb46a 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.vm04 vm04 *:9095 running (16m) 17s ago 17m 55.0M - 3.6.0 4fcecf061b74 946fe198c799 2026-04-15T14:30:21.154 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.lqvrsn vm04 *:8001 running (11m) 17s ago 16m 136M - 20.2.0-19-g7ec4401a095 259950fb12cb f4e5cc12214d 2026-04-15T14:30:21.155 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.owsxoy vm04 *:8000 running (5m) 17s ago 16m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb b4f24b4e998e 2026-04-15T14:30:21.155 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.dhvjjs vm05 *:8001 running (5m) 5s ago 16m 124M - 20.2.0-19-g7ec4401a095 259950fb12cb c319fda4e31a 2026-04-15T14:30:21.155 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm05.pzlhsk vm05 *:8000 running (33s) 5s ago 16m 101M - 20.2.0-19-g7ec4401a095 259950fb12cb 06587447d970 2026-04-15T14:30:21.227 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch ls' 2026-04-15T14:30:21.359 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager ?:9093,9094 1/1 17s ago 18m count:1 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:ceph-exporter ?:9926 2/2 17s ago 18m * 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:crash 2/2 17s ago 18m * 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:grafana ?:3000 1/1 17s ago 18m count:1 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:ingress.rgw.foo 12.12.1.104:9000,9001 4/4 17s ago 16m count:2 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:mgr 2/2 17s ago 18m count:2 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:mon 2/2 17s ago 18m vm04:192.168.123.104=vm04;vm05:192.168.123.105=vm05;count:2 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter ?:9100 2/2 17s ago 18m * 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:osd.all-available-devices 8 17s ago 17m * 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:prometheus ?:9095 1/1 17s ago 18m count:1 2026-04-15T14:30:21.759 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo ?:8000,8001 4/4 17s ago 16m count:4;* 2026-04-15T14:30:21.814 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch host ls' 2026-04-15T14:30:21.956 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:22.016 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:21 vm04.local ceph-mon[53345]: from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:22.016 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:21 vm04.local ceph-mon[53345]: pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:22.016 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:21 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.016 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:21 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.016 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:21 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.016 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:21 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:21 vm05.local ceph-mon[57841]: from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:21 vm05.local ceph-mon[57841]: pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:21 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:21 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:21 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:21 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:22.333 INFO:teuthology.orchestra.run.vm04.stdout:HOST ADDR LABELS STATUS 2026-04-15T14:30:22.333 INFO:teuthology.orchestra.run.vm04.stdout:vm04 192.168.123.104 2026-04-15T14:30:22.333 INFO:teuthology.orchestra.run.vm04.stdout:vm05 192.168.123.105 2026-04-15T14:30:22.333 INFO:teuthology.orchestra.run.vm04.stdout:2 hosts in cluster 2026-04-15T14:30:22.406 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch device ls' 2026-04-15T14:30:22.545 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:22.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:22 vm04.local ceph-mon[53345]: from='client.16910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:22.868 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:22 vm04.local ceph-mon[53345]: from='client.16914 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme0n1 hdd Linux_6b7ad07c165929411c9d 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme1n1 hdd Linux_fff789e869d944ed0405 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme2n1 hdd Linux_3369c177c0052290acf5 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/nvme3n1 hdd Linux_6212e725753f54ffe728 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 16m ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdb hdd DWNBRSTVMM04001 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdc hdd DWNBRSTVMM04002 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vdd hdd DWNBRSTVMM04003 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm04 /dev/vde hdd DWNBRSTVMM04004 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme0n1 hdd Linux_aa69ffdfa7408bf7867c 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme1n1 hdd Linux_6884663c6eaa85c58c9d 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme2n1 hdd Linux_01fde6453ce49045a1d5 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/nvme3n1 hdd Linux_0105428e5b971c67554c 19.9G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 16m ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdb hdd DWNBRSTVMM05001 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdc hdd DWNBRSTVMM05002 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vdd hdd DWNBRSTVMM05003 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.919 INFO:teuthology.orchestra.run.vm04.stdout:vm05 /dev/vde hdd DWNBRSTVMM05004 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T14:30:22.969 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-04-15T14:30:23.101 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:23.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:22 vm05.local ceph-mon[57841]: from='client.16910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:23.192 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:22 vm05.local ceph-mon[57841]: from='client.16914 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:23.495 INFO:teuthology.orchestra.run.vm04.stdout:osd.all-available-devices 8 19s ago 17m * 2026-04-15T14:30:23.567 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-04-15T14:30:23.570 INFO:tasks.vip:Removing 12.12.0.104 (and any VIPs) on vm04.local iface eth0... 2026-04-15T14:30:23.570 DEBUG:teuthology.orchestra.run.vm04:> sudo ip addr del 12.12.0.104/22 dev eth0 2026-04-15T14:30:23.599 DEBUG:teuthology.orchestra.run.vm04:> sudo ip addr del 12.12.1.104/22 dev eth0 2026-04-15T14:30:23.666 INFO:tasks.vip:Removing 12.12.0.105 (and any VIPs) on vm05.local iface eth0... 2026-04-15T14:30:23.666 DEBUG:teuthology.orchestra.run.vm05:> sudo ip addr del 12.12.0.105/22 dev eth0 2026-04-15T14:30:23.692 DEBUG:teuthology.orchestra.run.vm05:> sudo ip addr del 12.12.1.104/22 dev eth0 2026-04-15T14:30:23.754 INFO:teuthology.orchestra.run.vm05.stderr:Error: ipv4: Address not found. 2026-04-15T14:30:23.755 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-04-15T14:30:23.755 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-04-15T14:30:23.758 INFO:tasks.cephadm:Teardown begin 2026-04-15T14:30:23.758 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T14:30:23.785 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T14:30:23.820 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-04-15T14:30:23.820 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae -- ceph mgr module disable cephadm 2026-04-15T14:30:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:23 vm05.local ceph-mon[57841]: from='client.16918 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:23 vm05.local ceph-mon[57841]: pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:23.943 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:23 vm05.local ceph-mon[57841]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:23.965 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/mon.vm04/config 2026-04-15T14:30:23.988 INFO:teuthology.orchestra.run.vm04.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-04-15T14:30:24.012 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-04-15T14:30:24.012 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-04-15T14:30:24.013 DEBUG:teuthology.orchestra.run.vm04:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-15T14:30:24.028 DEBUG:teuthology.orchestra.run.vm05:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-15T14:30:24.044 INFO:tasks.cephadm:Stopping all daemons... 2026-04-15T14:30:24.044 INFO:tasks.cephadm.mon.vm04:Stopping mon.vm04... 2026-04-15T14:30:24.044 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04 2026-04-15T14:30:24.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:23 vm04.local ceph-mon[53345]: from='client.16918 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T14:30:24.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:23 vm04.local ceph-mon[53345]: pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 268 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T14:30:24.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.owsxoy", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:24.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.lqvrsn", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:24.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.pzlhsk", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:24.092 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:23 vm04.local ceph-mon[53345]: from='mgr.14231 192.168.123.104:0/3702050783' entity='mgr.vm04.ycniad' cmd={"prefix": "config get", "who": "client.rgw.foo.vm05.dhvjjs", "name": "rgw_frontends"} : dispatch 2026-04-15T14:30:24.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:24 vm04.local systemd[1]: Stopping Ceph mon.vm04 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae... 2026-04-15T14:30:24.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:24 vm04.local ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04[53341]: 2026-04-15T14:30:24.251+0000 7f09b95c1640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm04 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-04-15T14:30:24.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:24 vm04.local ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04[53341]: 2026-04-15T14:30:24.251+0000 7f09b95c1640 -1 mon.vm04@0(leader) e2 *** Got Signal Terminated *** 2026-04-15T14:30:24.368 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 15 14:30:24 vm04.local podman[123760]: 2026-04-15 14:30:24.292460014 +0000 UTC m=+0.126203415 container died 6257b904a43573c45c7e4cade5de85efd85107bb4e3c97aa3792b1da2d0f3e6f (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm04, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095) 2026-04-15T14:30:24.498 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm04.service' 2026-04-15T14:30:24.576 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-15T14:30:24.576 INFO:tasks.cephadm.mon.vm04:Stopped mon.vm04 2026-04-15T14:30:24.576 INFO:tasks.cephadm.mon.vm05:Stopping mon.vm05... 2026-04-15T14:30:24.576 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm05 2026-04-15T14:30:24.858 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:24 vm05.local systemd[1]: Stopping Ceph mon.vm05 for d89dc7c6-38d4-11f1-aa58-cd98464f39ae... 2026-04-15T14:30:24.858 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:24 vm05.local ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05[57837]: 2026-04-15T14:30:24.681+0000 7fb22516e640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm05 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-04-15T14:30:24.858 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:24 vm05.local ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05[57837]: 2026-04-15T14:30:24.681+0000 7fb22516e640 -1 mon.vm05@1(peon) e2 *** Got Signal Terminated *** 2026-04-15T14:30:24.858 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:24 vm05.local podman[91613]: 2026-04-15 14:30:24.824753667 +0000 UTC m=+0.157278212 container died c33af830112a7b1a52d5917062a40a0cec23bf74426f2cf7e036e494d14bf03b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9) 2026-04-15T14:30:24.858 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:24 vm05.local podman[91613]: 2026-04-15 14:30:24.840620669 +0000 UTC m=+0.173145214 container remove c33af830112a7b1a52d5917062a40a0cec23bf74426f2cf7e036e494d14bf03b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9, name=ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05, CEPH_REF=20.2.0-19-g7ec4401a095, CEPH_SHA1=7ec4401a095f03c389fcf6df60e966f86395fb86, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git) 2026-04-15T14:30:24.858 INFO:journalctl@ceph.mon.vm05.vm05.stdout:Apr 15 14:30:24 vm05.local bash[91613]: ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae-mon-vm05 2026-04-15T14:30:24.923 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d89dc7c6-38d4-11f1-aa58-cd98464f39ae@mon.vm05.service' 2026-04-15T14:30:24.958 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-15T14:30:24.959 INFO:tasks.cephadm.mon.vm05:Stopped mon.vm05 2026-04-15T14:30:24.959 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae --force --keep-logs 2026-04-15T14:30:25.101 INFO:teuthology.orchestra.run.vm04.stdout:Deleting cluster with fsid: d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:31:15.160 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae --force --keep-logs 2026-04-15T14:31:15.307 INFO:teuthology.orchestra.run.vm05.stdout:Deleting cluster with fsid: d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:32:03.944 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T14:32:03.979 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T14:32:04.007 INFO:tasks.cephadm:Archiving crash dumps... 2026-04-15T14:32:04.007 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/crash to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369/remote/vm04/crash 2026-04-15T14:32:04.007 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/crash -- . 2026-04-15T14:32:04.048 INFO:teuthology.orchestra.run.vm04.stderr:tar: /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/crash: Cannot open: No such file or directory 2026-04-15T14:32:04.048 INFO:teuthology.orchestra.run.vm04.stderr:tar: Error is not recoverable: exiting now 2026-04-15T14:32:04.049 DEBUG:teuthology.misc:Transferring archived files from vm05:/var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/crash to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369/remote/vm05/crash 2026-04-15T14:32:04.049 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/crash -- . 2026-04-15T14:32:04.079 INFO:teuthology.orchestra.run.vm05.stderr:tar: /var/lib/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/crash: Cannot open: No such file or directory 2026-04-15T14:32:04.079 INFO:teuthology.orchestra.run.vm05.stderr:tar: Error is not recoverable: exiting now 2026-04-15T14:32:04.080 INFO:tasks.cephadm:Checking cluster log for badness... 2026-04-15T14:32:04.080 DEBUG:teuthology.orchestra.run.vm04:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-04-15T14:32:04.121 INFO:tasks.cephadm:Compressing logs... 2026-04-15T14:32:04.121 DEBUG:teuthology.orchestra.run.vm04:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T14:32:04.164 DEBUG:teuthology.orchestra.run.vm05:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T14:32:04.188 INFO:teuthology.orchestra.run.vm04.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-04-15T14:32:04.188 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-15T14:32:04.188 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mon.vm04.log 2026-04-15T14:32:04.189 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.log 2026-04-15T14:32:04.189 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.audit.log 2026-04-15T14:32:04.189 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mon.vm04.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mgr.vm04.ycniad.log 2026-04-15T14:32:04.191 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.audit.log: /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.log: 89.9% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.log.gz 2026-04-15T14:32:04.191 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.cephadm.log 2026-04-15T14:32:04.192 INFO:teuthology.orchestra.run.vm05.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-04-15T14:32:04.192 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-15T14:32:04.192 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-volume.log 2026-04-15T14:32:04.192 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mgr.vm04.ycniad.log: 91.3% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.audit.log.gz 2026-04-15T14:32:04.193 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.ceph-exporter.vm05.log 2026-04-15T14:32:04.193 INFO:teuthology.orchestra.run.vm05.stderr:gzip 92.0% -5 -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-15T14:32:04.193 INFO:teuthology.orchestra.run.vm05.stderr: --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mgr.vm05.ozgwuj.log 2026-04-15T14:32:04.194 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-volume.log 2026-04-15T14:32:04.194 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.cephadm.log: 83.2% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.cephadm.log.gz 2026-04-15T14:32:04.194 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.ceph-exporter.vm05.log: /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mon.vm05.log 2026-04-15T14:32:04.195 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mgr.vm05.ozgwuj.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.audit.log 2026-04-15T14:32:04.195 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mon.vm05.log: 94.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.ceph-exporter.vm05.log.gz 2026-04-15T14:32:04.195 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.log 2026-04-15T14:32:04.196 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.ceph-exporter.vm04.log 2026-04-15T14:32:04.198 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.audit.log: 91.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.audit.log.gz 2026-04-15T14:32:04.203 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.cephadm.log 2026-04-15T14:32:04.203 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.log: 92.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mgr.vm05.ozgwuj.log.gz 2026-04-15T14:32:04.203 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.0.log 2026-04-15T14:32:04.203 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.cephadm.log: 82.6% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.cephadm.log.gz 2026-04-15T14:32:04.203 INFO:teuthology.orchestra.run.vm05.stderr: 90.0% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph.log.gz 2026-04-15T14:32:04.203 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.2.log 2026-04-15T14:32:04.203 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.4.log 2026-04-15T14:32:04.206 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.1.log 2026-04-15T14:32:04.207 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.ceph-exporter.vm04.log: 94.4% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.ceph-exporter.vm04.log.gz 2026-04-15T14:32:04.208 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.7.log 2026-04-15T14:32:04.209 INFO:teuthology.orchestra.run.vm04.stderr: 96.1% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-volume.log.gz 2026-04-15T14:32:04.209 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.3.log 2026-04-15T14:32:04.212 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.1.log: 91.9% -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-15T14:32:04.212 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.4.log: 96.1% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-volume.log.gz 2026-04-15T14:32:04.213 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.5.log 2026-04-15T14:32:04.214 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm05.pzlhsk.log 2026-04-15T14:32:04.220 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.6.log 2026-04-15T14:32:04.225 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm04.owsxoy.log 2026-04-15T14:32:04.225 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.7.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm05.dhvjjs.log 2026-04-15T14:32:04.229 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm05.pzlhsk.log: 91.7% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm05.pzlhsk.log.gz 2026-04-15T14:32:04.231 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm05.pzlhsk.log 2026-04-15T14:32:04.234 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm04.lqvrsn.log 2026-04-15T14:32:04.236 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm05.dhvjjs.log: 91.9% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm05.dhvjjs.log.gz 2026-04-15T14:32:04.237 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm04.owsxoy.log: 91.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm04.owsxoy.log.gz 2026-04-15T14:32:04.240 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm04.lqvrsn.log 2026-04-15T14:32:04.250 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm05.dhvjjs.log 2026-04-15T14:32:04.250 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm05.pzlhsk.log: 93.9% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm05.pzlhsk.log.gz 2026-04-15T14:32:04.250 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm04.lqvrsn.log: 91.8% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-client.rgw.foo.vm04.lqvrsn.log.gz 2026-04-15T14:32:04.252 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm04.owsxoy.log 2026-04-15T14:32:04.254 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm04.lqvrsn.log: 93.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm04.lqvrsn.log.gz 2026-04-15T14:32:04.259 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm04.owsxoy.log: 93.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm04.owsxoy.log.gz 2026-04-15T14:32:04.259 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm05.dhvjjs.log: 93.8% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ops-log-ceph-client.rgw.foo.vm05.dhvjjs.log.gz 2026-04-15T14:32:04.345 INFO:teuthology.orchestra.run.vm05.stderr: 92.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mon.vm05.log.gz 2026-04-15T14:32:04.574 INFO:teuthology.orchestra.run.vm04.stderr: 89.6% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mgr.vm04.ycniad.log.gz 2026-04-15T14:32:04.699 INFO:teuthology.orchestra.run.vm05.stderr: 93.4% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.2.log.gz 2026-04-15T14:32:04.785 INFO:teuthology.orchestra.run.vm04.stderr: 90.7% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-mon.vm04.log.gz 2026-04-15T14:32:04.873 INFO:teuthology.orchestra.run.vm05.stderr: 93.3% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.0.log.gz 2026-04-15T14:32:05.039 INFO:teuthology.orchestra.run.vm05.stderr: 93.7% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.4.log.gz 2026-04-15T14:32:05.126 INFO:teuthology.orchestra.run.vm04.stderr: 93.1% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.5.log.gz 2026-04-15T14:32:05.128 INFO:teuthology.orchestra.run.vm04.stderr: 93.1% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.6.log.gz 2026-04-15T14:32:05.164 INFO:teuthology.orchestra.run.vm05.stderr: 93.6% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.7.log.gz 2026-04-15T14:32:05.166 INFO:teuthology.orchestra.run.vm05.stderr: 2026-04-15T14:32:05.166 INFO:teuthology.orchestra.run.vm05.stderr:real 0m0.986s 2026-04-15T14:32:05.166 INFO:teuthology.orchestra.run.vm05.stderr:user 0m2.857s 2026-04-15T14:32:05.166 INFO:teuthology.orchestra.run.vm05.stderr:sys 0m0.162s 2026-04-15T14:32:05.230 INFO:teuthology.orchestra.run.vm04.stderr: 93.6% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.3.log.gz 2026-04-15T14:32:05.417 INFO:teuthology.orchestra.run.vm04.stderr: 93.5% -- replaced with /var/log/ceph/d89dc7c6-38d4-11f1-aa58-cd98464f39ae/ceph-osd.1.log.gz 2026-04-15T14:32:05.420 INFO:teuthology.orchestra.run.vm04.stderr: 2026-04-15T14:32:05.420 INFO:teuthology.orchestra.run.vm04.stderr:real 0m1.242s 2026-04-15T14:32:05.420 INFO:teuthology.orchestra.run.vm04.stderr:user 0m3.189s 2026-04-15T14:32:05.420 INFO:teuthology.orchestra.run.vm04.stderr:sys 0m0.155s 2026-04-15T14:32:05.420 INFO:tasks.cephadm:Archiving logs... 2026-04-15T14:32:05.420 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/log/ceph to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369/remote/vm04/log 2026-04-15T14:32:05.420 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-15T14:32:05.661 DEBUG:teuthology.misc:Transferring archived files from vm05:/var/log/ceph to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369/remote/vm05/log 2026-04-15T14:32:05.661 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-15T14:32:05.843 INFO:tasks.cephadm:Removing cluster... 2026-04-15T14:32:05.843 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae --force 2026-04-15T14:32:06.005 INFO:teuthology.orchestra.run.vm04.stdout:Deleting cluster with fsid: d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:32:06.079 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d89dc7c6-38d4-11f1-aa58-cd98464f39ae --force 2026-04-15T14:32:06.242 INFO:teuthology.orchestra.run.vm05.stdout:Deleting cluster with fsid: d89dc7c6-38d4-11f1-aa58-cd98464f39ae 2026-04-15T14:32:06.317 INFO:tasks.cephadm:Removing cephadm ... 2026-04-15T14:32:06.317 DEBUG:teuthology.orchestra.run.vm04:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-15T14:32:06.338 DEBUG:teuthology.orchestra.run.vm05:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-15T14:32:06.360 INFO:tasks.cephadm:Teardown complete 2026-04-15T14:32:06.360 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-04-15T14:32:06.364 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_1... 2026-04-15T14:32:06.364 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_1 2026-04-15T14:32:06.512 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-15T14:32:06.514 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_2... 2026-04-15T14:32:06.514 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_2 2026-04-15T14:32:06.640 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-15T14:32:06.642 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_3... 2026-04-15T14:32:06.642 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_3 2026-04-15T14:32:06.773 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-15T14:32:06.775 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_4... 2026-04-15T14:32:06.775 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_4 2026-04-15T14:32:06.889 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-15T14:32:06.892 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-15T14:32:06.892 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/scratch_devs 2026-04-15T14:32:06.922 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm05:/dev/vg_nvme/lv_1... 2026-04-15T14:32:06.923 DEBUG:teuthology.orchestra.run.vm05:> sudo nvme disconnect -n lv_1 2026-04-15T14:32:07.068 INFO:teuthology.orchestra.run.vm05.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-15T14:32:07.070 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm05:/dev/vg_nvme/lv_2... 2026-04-15T14:32:07.070 DEBUG:teuthology.orchestra.run.vm05:> sudo nvme disconnect -n lv_2 2026-04-15T14:32:07.203 INFO:teuthology.orchestra.run.vm05.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-15T14:32:07.204 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm05:/dev/vg_nvme/lv_3... 2026-04-15T14:32:07.204 DEBUG:teuthology.orchestra.run.vm05:> sudo nvme disconnect -n lv_3 2026-04-15T14:32:07.359 INFO:teuthology.orchestra.run.vm05.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-15T14:32:07.361 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm05:/dev/vg_nvme/lv_4... 2026-04-15T14:32:07.361 DEBUG:teuthology.orchestra.run.vm05:> sudo nvme disconnect -n lv_4 2026-04-15T14:32:07.500 INFO:teuthology.orchestra.run.vm05.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-15T14:32:07.502 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-04-15T14:32:07.502 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/scratch_devs 2026-04-15T14:32:07.531 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-04-15T14:32:07.541 INFO:teuthology.task.clock:Checking final clock skew... 2026-04-15T14:32:07.541 DEBUG:teuthology.orchestra.run.vm04:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T14:32:07.544 DEBUG:teuthology.orchestra.run.vm05:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T14:32:07.558 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-04-15T14:32:07.566 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-15T14:32:07.566 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-04-15T14:32:07.566 INFO:teuthology.orchestra.run.vm04.stdout:^* s7.vonderste.in 2 6 377 12 +708us[ +461us] +/- 19ms 2026-04-15T14:32:07.566 INFO:teuthology.orchestra.run.vm04.stdout:^+ ns.gunnarhofmann.de 2 7 377 14 -139us[ -386us] +/- 42ms 2026-04-15T14:32:07.566 INFO:teuthology.orchestra.run.vm04.stdout:^- 85.215.227.11 2 7 377 12 -5709us[-5709us] +/- 58ms 2026-04-15T14:32:07.566 INFO:teuthology.orchestra.run.vm04.stdout:^+ ntp2.kernfusion.at 2 7 377 74 -1128us[-1375us] +/- 23ms 2026-04-15T14:32:07.595 INFO:teuthology.orchestra.run.vm05.stderr:bash: line 1: ntpq: command not found 2026-04-15T14:32:07.599 INFO:teuthology.orchestra.run.vm05.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-15T14:32:07.599 INFO:teuthology.orchestra.run.vm05.stdout:=============================================================================== 2026-04-15T14:32:07.599 INFO:teuthology.orchestra.run.vm05.stdout:^+ ns.gunnarhofmann.de 2 7 377 78 -146us[ -350us] +/- 41ms 2026-04-15T14:32:07.599 INFO:teuthology.orchestra.run.vm05.stdout:^* s7.vonderste.in 2 6 377 12 +678us[ +881us] +/- 19ms 2026-04-15T14:32:07.599 INFO:teuthology.orchestra.run.vm05.stdout:^- 85.215.227.11 2 7 377 15 -5666us[-5462us] +/- 58ms 2026-04-15T14:32:07.599 INFO:teuthology.orchestra.run.vm05.stdout:^+ ntp2.kernfusion.at 2 7 377 79 +276us[ +71us] +/- 23ms 2026-04-15T14:32:07.600 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-04-15T14:32:07.603 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-04-15T14:32:07.603 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-04-15T14:32:07.607 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-04-15T14:32:07.610 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-04-15T14:32:07.613 INFO:teuthology.task.internal:Duration was 1437.888514 seconds 2026-04-15T14:32:07.614 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-04-15T14:32:07.617 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-04-15T14:32:07.617 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-15T14:32:07.620 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-15T14:32:07.672 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-15T14:32:07.696 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-15T14:32:08.164 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-04-15T14:32:08.164 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm04.local 2026-04-15T14:32:08.165 DEBUG:teuthology.orchestra.run.vm04:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-15T14:32:08.194 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm05.local 2026-04-15T14:32:08.195 DEBUG:teuthology.orchestra.run.vm05:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-15T14:32:08.224 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-04-15T14:32:08.225 DEBUG:teuthology.orchestra.run.vm04:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T14:32:08.237 DEBUG:teuthology.orchestra.run.vm05:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T14:32:08.899 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-04-15T14:32:08.900 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T14:32:08.901 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T14:32:08.929 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T14:32:08.929 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T14:32:08.929 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T14:32:08.929 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-15T14:32:08.930 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-15T14:32:08.933 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T14:32:08.934 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T14:32:08.934 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T14:32:08.934 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-15T14:32:08.934 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-15T14:32:09.083 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.2% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-15T14:32:09.134 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.9% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-15T14:32:09.136 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-04-15T14:32:09.140 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-04-15T14:32:09.140 DEBUG:teuthology.orchestra.run.vm04:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-15T14:32:09.210 DEBUG:teuthology.orchestra.run.vm05:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-15T14:32:09.243 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-04-15T14:32:09.247 DEBUG:teuthology.orchestra.run.vm04:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-15T14:32:09.252 DEBUG:teuthology.orchestra.run.vm05:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-15T14:32:09.279 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = core 2026-04-15T14:32:09.312 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = core 2026-04-15T14:32:09.327 DEBUG:teuthology.orchestra.run.vm04:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-15T14:32:09.356 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T14:32:09.357 DEBUG:teuthology.orchestra.run.vm05:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-15T14:32:09.384 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T14:32:09.384 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-04-15T14:32:09.387 INFO:teuthology.task.internal:Transferring archived files... 2026-04-15T14:32:09.388 DEBUG:teuthology.misc:Transferring archived files from vm04:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369/remote/vm04 2026-04-15T14:32:09.388 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-15T14:32:09.435 DEBUG:teuthology.misc:Transferring archived files from vm05:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5369/remote/vm05 2026-04-15T14:32:09.435 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-15T14:32:09.469 INFO:teuthology.task.internal:Removing archive directory... 2026-04-15T14:32:09.469 DEBUG:teuthology.orchestra.run.vm04:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-15T14:32:09.476 DEBUG:teuthology.orchestra.run.vm05:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-15T14:32:09.527 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-04-15T14:32:09.530 INFO:teuthology.task.internal:Not uploading archives. 2026-04-15T14:32:09.530 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-04-15T14:32:09.533 INFO:teuthology.task.internal:Tidying up after the test... 2026-04-15T14:32:09.533 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-15T14:32:09.539 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-15T14:32:09.560 INFO:teuthology.orchestra.run.vm04.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Apr 15 14:32 /home/ubuntu/cephtest 2026-04-15T14:32:09.592 INFO:teuthology.orchestra.run.vm05.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Apr 15 14:32 /home/ubuntu/cephtest 2026-04-15T14:32:09.593 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-04-15T14:32:09.605 INFO:teuthology.run:Summary data: description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} duration: 1437.8885135650635 owner: supriti success: true 2026-04-15T14:32:09.605 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-15T14:32:09.650 INFO:teuthology.run:pass