2026-03-10T12:10:42.624 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T12:10:42.629 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T12:10:42.650 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025 branch: squid description: orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 1-start 2-services/rgw-ingress 3-final} email: null first_in_suite: false flavor: default job_id: '1025' last_in_suite: false machine_type: vps name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-squid sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 8043 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b targets: vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMZBW/a3CoI91R8GJnSEm6upw+hMr2hA+7dj1Yd6joMmD2bO/tmP8P/JLjwCM7Qy0xUSjLquNLtqi98aPGxQbwc= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPux4MtNg3cfCpG0be0ghM/Vr9Uq9SmNFXywU4VaZXj9T+CI7OFFXkK6jyynBiKpz3MHJWAHNgicH0cYyLwc0Ms= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - cephadm.apply: specs: - placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 - placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: '{{VIP0}}/{{VIPPREFIXLEN}}' - cephadm.wait_for_service: service: rgw.foo - cephadm.wait_for_service: service: ingress.rgw.foo - cephadm.shell: host.a: - "echo \"Check while healthy...\"\ncurl http://{{VIP0}}:9000/\n\n# stop each\ \ rgw in turn\necho \"Check with each rgw stopped in turn...\"\nfor rgw in `ceph\ \ orch ps | grep ^rgw.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $rgw\n timeout 300 bash -c \"while ! ceph orch ps | grep $rgw | grep stopped;\ \ do echo 'Waiting for $rgw to stop'; ceph orch ps --daemon-type rgw; ceph health\ \ detail; sleep 5 ; done\"\n timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/\ \ ; do echo 'Waiting for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\ \n ceph orch daemon start $rgw\n timeout 300 bash -c \"while ! ceph orch ps\ \ | grep $rgw | grep running; do echo 'Waiting for $rgw to start'; ceph orch\ \ ps --daemon-type rgw; ceph health detail; sleep 5 ; done\"\ndone\n\n# stop\ \ each haproxy in turn\necho \"Check with each haproxy down in turn...\"\nfor\ \ haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do\n\ \ ceph orch daemon stop $haproxy\n timeout 300 bash -c \"while ! ceph orch\ \ ps | grep $haproxy | grep stopped; do echo 'Waiting for $haproxy to stop';\ \ ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\n\ \ timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting\ \ for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\n ceph orch\ \ daemon start $haproxy\n timeout 300 bash -c \"while ! ceph orch ps | grep\ \ $haproxy | grep running; do echo 'Waiting for $haproxy to start'; ceph orch\ \ ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\ndone\n\ntimeout\ \ 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting for http://{{VIP0}}:9000/\ \ to be available'; sleep 1 ; done\"\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_01:00:38 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-10T12:10:42.650 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa; will attempt to use it 2026-03-10T12:10:42.651 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa/tasks 2026-03-10T12:10:42.651 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-10T12:10:42.651 INFO:teuthology.task.internal:Checking packages... 2026-03-10T12:10:42.651 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-10T12:10:42.651 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-10T12:10:42.651 INFO:teuthology.packaging:ref: None 2026-03-10T12:10:42.651 INFO:teuthology.packaging:tag: None 2026-03-10T12:10:42.651 INFO:teuthology.packaging:branch: squid 2026-03-10T12:10:42.651 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T12:10:42.651 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-10T12:10:43.456 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-10T12:10:43.457 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-10T12:10:43.458 INFO:teuthology.task.internal:no buildpackages task found 2026-03-10T12:10:43.458 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T12:10:43.458 INFO:teuthology.task.internal:Saving configuration 2026-03-10T12:10:43.464 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T12:10:43.464 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T12:10:43.471 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm03.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 12:09:28.736237', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:03', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMZBW/a3CoI91R8GJnSEm6upw+hMr2hA+7dj1Yd6joMmD2bO/tmP8P/JLjwCM7Qy0xUSjLquNLtqi98aPGxQbwc='} 2026-03-10T12:10:43.477 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 12:09:28.736627', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPux4MtNg3cfCpG0be0ghM/Vr9Uq9SmNFXywU4VaZXj9T+CI7OFFXkK6jyynBiKpz3MHJWAHNgicH0cYyLwc0Ms='} 2026-03-10T12:10:43.477 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T12:10:43.478 INFO:teuthology.task.internal:roles: ubuntu@vm03.local - ['host.a', 'client.0'] 2026-03-10T12:10:43.478 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['host.b', 'client.1'] 2026-03-10T12:10:43.478 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T12:10:43.485 DEBUG:teuthology.task.console_log:vm03 does not support IPMI; excluding 2026-03-10T12:10:43.491 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-03-10T12:10:43.491 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fd2a651a290>, signals=[15]) 2026-03-10T12:10:43.491 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T12:10:43.492 INFO:teuthology.task.internal:Opening connections... 2026-03-10T12:10:43.492 DEBUG:teuthology.task.internal:connecting to ubuntu@vm03.local 2026-03-10T12:10:43.493 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T12:10:43.551 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-03-10T12:10:43.552 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T12:10:43.610 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T12:10:43.611 DEBUG:teuthology.orchestra.run.vm03:> uname -m 2026-03-10T12:10:43.662 INFO:teuthology.orchestra.run.vm03.stdout:x86_64 2026-03-10T12:10:43.662 DEBUG:teuthology.orchestra.run.vm03:> cat /etc/os-release 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:NAME="CentOS Stream" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:VERSION="9" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:ID="centos" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:ID_LIKE="rhel fedora" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:VERSION_ID="9" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:PLATFORM_ID="platform:el9" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:ANSI_COLOR="0;31" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:LOGO="fedora-logo-icon" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:HOME_URL="https://centos.org/" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T12:10:43.718 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T12:10:43.719 INFO:teuthology.lock.ops:Updating vm03.local on lock server 2026-03-10T12:10:43.723 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-03-10T12:10:43.738 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-03-10T12:10:43.738 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:NAME="CentOS Stream" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="9" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:ID="centos" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE="rhel fedora" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="9" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:PLATFORM_ID="platform:el9" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:ANSI_COLOR="0;31" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:LOGO="fedora-logo-icon" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://centos.org/" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T12:10:43.792 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T12:10:43.792 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-03-10T12:10:43.796 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T12:10:43.798 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T12:10:43.799 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T12:10:43.799 DEBUG:teuthology.orchestra.run.vm03:> test '!' -e /home/ubuntu/cephtest 2026-03-10T12:10:43.801 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-03-10T12:10:43.846 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T12:10:43.846 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T12:10:43.847 DEBUG:teuthology.orchestra.run.vm03:> test -z $(ls -A /var/lib/ceph) 2026-03-10T12:10:43.856 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-03-10T12:10:43.869 INFO:teuthology.orchestra.run.vm03.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T12:10:43.900 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T12:10:43.901 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T12:10:43.908 DEBUG:teuthology.orchestra.run.vm03:> test -e /ceph-qa-ready 2026-03-10T12:10:43.923 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:10:44.121 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-03-10T12:10:44.135 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:10:44.386 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T12:10:44.388 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T12:10:44.388 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T12:10:44.389 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T12:10:44.404 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T12:10:44.405 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T12:10:44.406 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T12:10:44.406 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T12:10:44.444 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T12:10:44.461 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T12:10:44.462 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T12:10:44.463 DEBUG:teuthology.orchestra.run.vm03:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T12:10:44.513 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:10:44.513 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T12:10:44.527 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:10:44.528 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T12:10:44.556 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T12:10:44.579 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T12:10:44.588 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T12:10:44.591 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T12:10:44.600 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T12:10:44.632 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T12:10:44.637 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T12:10:44.637 DEBUG:teuthology.orchestra.run.vm03:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T12:10:44.638 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T12:10:44.695 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T12:10:44.697 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T12:10:44.697 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T12:10:44.702 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T12:10:44.752 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T12:10:44.780 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T12:10:44.834 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:10:44.834 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T12:10:44.893 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T12:10:44.915 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T12:10:44.970 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-10T12:10:44.970 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T12:10:45.029 DEBUG:teuthology.orchestra.run.vm03:> sudo service rsyslog restart 2026-03-10T12:10:45.031 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-03-10T12:10:45.056 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T12:10:45.096 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T12:10:45.413 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T12:10:45.415 INFO:teuthology.task.internal:Starting timer... 2026-03-10T12:10:45.415 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T12:10:45.418 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T12:10:45.420 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-10T12:10:45.420 INFO:teuthology.task.selinux:Excluding vm03: VMs are not yet supported 2026-03-10T12:10:45.420 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-03-10T12:10:45.420 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T12:10:45.420 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T12:10:45.420 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T12:10:45.420 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T12:10:45.421 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T12:10:45.422 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-10T12:10:45.423 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-10T12:10:46.041 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-10T12:10:46.046 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T12:10:46.047 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryqxc_9u9q --limit vm03.local,vm09.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T12:12:44.263 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm03.local'), Remote(name='ubuntu@vm09.local')] 2026-03-10T12:12:44.264 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm03.local' 2026-03-10T12:12:44.264 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T12:12:44.329 DEBUG:teuthology.orchestra.run.vm03:> true 2026-03-10T12:12:44.409 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm03.local' 2026-03-10T12:12:44.409 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-03-10T12:12:44.409 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T12:12:44.475 DEBUG:teuthology.orchestra.run.vm09:> true 2026-03-10T12:12:44.547 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-03-10T12:12:44.548 INFO:teuthology.run_tasks:Running task clock... 2026-03-10T12:12:44.550 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-10T12:12:44.550 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T12:12:44.551 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T12:12:44.553 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T12:12:44.553 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T12:12:44.581 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T12:12:44.596 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T12:12:44.622 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T12:12:44.622 INFO:teuthology.orchestra.run.vm03.stderr:sudo: ntpd: command not found 2026-03-10T12:12:44.634 INFO:teuthology.orchestra.run.vm03.stdout:506 Cannot talk to daemon 2026-03-10T12:12:44.637 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T12:12:44.649 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T12:12:44.664 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T12:12:44.666 INFO:teuthology.orchestra.run.vm09.stderr:sudo: ntpd: command not found 2026-03-10T12:12:44.679 INFO:teuthology.orchestra.run.vm09.stdout:506 Cannot talk to daemon 2026-03-10T12:12:44.698 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T12:12:44.708 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-10T12:12:44.710 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T12:12:44.710 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-10T12:12:44.718 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T12:12:44.767 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-10T12:12:44.769 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T12:12:44.769 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-10T12:12:44.770 INFO:teuthology.run_tasks:Running task pexec... 2026-03-10T12:12:44.772 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-10T12:12:44.772 DEBUG:teuthology.orchestra.run.vm03:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-10T12:12:44.772 DEBUG:teuthology.orchestra.run.vm09:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-10T12:12:44.774 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf remove nvme-cli -y 2026-03-10T12:12:44.774 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-10T12:12:44.774 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm03.local 2026-03-10T12:12:44.774 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-10T12:12:44.774 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-10T12:12:44.813 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf remove nvme-cli -y 2026-03-10T12:12:44.813 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-10T12:12:44.813 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm09.local 2026-03-10T12:12:44.813 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-10T12:12:44.813 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-10T12:12:44.961 INFO:teuthology.orchestra.run.vm03.stdout:No match for argument: nvme-cli 2026-03-10T12:12:44.961 INFO:teuthology.orchestra.run.vm03.stderr:No packages marked for removal. 2026-03-10T12:12:44.964 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-10T12:12:44.965 INFO:teuthology.orchestra.run.vm03.stdout:Nothing to do. 2026-03-10T12:12:44.965 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-10T12:12:45.038 INFO:teuthology.orchestra.run.vm09.stdout:No match for argument: nvme-cli 2026-03-10T12:12:45.038 INFO:teuthology.orchestra.run.vm09.stderr:No packages marked for removal. 2026-03-10T12:12:45.041 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-10T12:12:45.041 INFO:teuthology.orchestra.run.vm09.stdout:Nothing to do. 2026-03-10T12:12:45.041 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-10T12:12:45.332 INFO:teuthology.orchestra.run.vm03.stdout:Last metadata expiration check: 0:01:26 ago on Tue 10 Mar 2026 12:11:19 PM UTC. 2026-03-10T12:12:45.429 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: Package Architecture Version Repository Size 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:Installing: 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:Installing dependencies: 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:Transaction Summary 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:Install 6 Packages 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:Total download size: 2.3 M 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:Installed size: 11 M 2026-03-10T12:12:45.430 INFO:teuthology.orchestra.run.vm03.stdout:Downloading Packages: 2026-03-10T12:12:45.495 INFO:teuthology.orchestra.run.vm09.stdout:Last metadata expiration check: 0:01:06 ago on Tue 10 Mar 2026 12:11:39 PM UTC. 2026-03-10T12:12:45.609 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: Package Architecture Version Repository Size 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:Installing: 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:Installing dependencies: 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:Transaction Summary 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:Install 6 Packages 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:Total download size: 2.3 M 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:Installed size: 11 M 2026-03-10T12:12:45.610 INFO:teuthology.orchestra.run.vm09.stdout:Downloading Packages: 2026-03-10T12:12:45.669 INFO:teuthology.orchestra.run.vm03.stdout:[MIRROR] nvmetcli-0.8-3.el9.noarch.rpm: Status code: 404 for https://mirror.karneval.cz/pub/linux/centos-stream/9-stream/BaseOS/x86_64/os/Packages/nvmetcli-0.8-3.el9.noarch.rpm (IP: 89.102.0.150) 2026-03-10T12:12:45.671 INFO:teuthology.orchestra.run.vm03.stdout:[MIRROR] python3-configshell-1.1.30-1.el9.noarch.rpm: Status code: 404 for https://mirror.karneval.cz/pub/linux/centos-stream/9-stream/BaseOS/x86_64/os/Packages/python3-configshell-1.1.30-1.el9.noarch.rpm (IP: 89.102.0.150) 2026-03-10T12:12:45.674 INFO:teuthology.orchestra.run.vm03.stdout:[MIRROR] nvme-cli-2.16-1.el9.x86_64.rpm: Status code: 404 for https://mirror.karneval.cz/pub/linux/centos-stream/9-stream/BaseOS/x86_64/os/Packages/nvme-cli-2.16-1.el9.x86_64.rpm (IP: 89.102.0.150) 2026-03-10T12:12:45.782 INFO:teuthology.orchestra.run.vm03.stdout:(1/6): python3-configshell-1.1.30-1.el9.noarch. 289 kB/s | 72 kB 00:00 2026-03-10T12:12:45.783 INFO:teuthology.orchestra.run.vm03.stdout:(2/6): nvmetcli-0.8-3.el9.noarch.rpm 175 kB/s | 44 kB 00:00 2026-03-10T12:12:45.811 INFO:teuthology.orchestra.run.vm03.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 2.8 MB/s | 84 kB 00:00 2026-03-10T12:12:45.845 INFO:teuthology.orchestra.run.vm03.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.4 MB/s | 150 kB 00:00 2026-03-10T12:12:45.900 INFO:teuthology.orchestra.run.vm03.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 3.2 MB/s | 1.2 MB 00:00 2026-03-10T12:12:45.903 INFO:teuthology.orchestra.run.vm03.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 9.0 MB/s | 837 kB 00:00 2026-03-10T12:12:45.904 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-10T12:12:45.904 INFO:teuthology.orchestra.run.vm03.stdout:Total 4.9 MB/s | 2.3 MB 00:00 2026-03-10T12:12:45.959 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction check 2026-03-10T12:12:45.967 INFO:teuthology.orchestra.run.vm03.stdout:Transaction check succeeded. 2026-03-10T12:12:45.967 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction test 2026-03-10T12:12:46.012 INFO:teuthology.orchestra.run.vm03.stdout:Transaction test succeeded. 2026-03-10T12:12:46.012 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction 2026-03-10T12:12:46.152 INFO:teuthology.orchestra.run.vm03.stdout: Preparing : 1/1 2026-03-10T12:12:46.163 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-10T12:12:46.173 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-10T12:12:46.180 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-10T12:12:46.188 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-10T12:12:46.189 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-10T12:12:46.338 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-10T12:12:46.342 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-10T12:12:46.597 INFO:teuthology.orchestra.run.vm09.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 84 kB/s | 44 kB 00:00 2026-03-10T12:12:46.633 INFO:teuthology.orchestra.run.vm09.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 128 kB/s | 72 kB 00:00 2026-03-10T12:12:46.681 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-10T12:12:46.681 INFO:teuthology.orchestra.run.vm03.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-10T12:12:46.681 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:12:46.762 INFO:teuthology.orchestra.run.vm09.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 509 kB/s | 84 kB 00:00 2026-03-10T12:12:46.837 INFO:teuthology.orchestra.run.vm09.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 742 kB/s | 150 kB 00:00 2026-03-10T12:12:46.867 INFO:teuthology.orchestra.run.vm09.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 1.5 MB/s | 1.2 MB 00:00 2026-03-10T12:12:47.037 INFO:teuthology.orchestra.run.vm09.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 3.0 MB/s | 837 kB 00:00 2026-03-10T12:12:47.037 INFO:teuthology.orchestra.run.vm09.stdout:-------------------------------------------------------------------------------- 2026-03-10T12:12:47.037 INFO:teuthology.orchestra.run.vm09.stdout:Total 1.6 MB/s | 2.3 MB 00:01 2026-03-10T12:12:47.109 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction check 2026-03-10T12:12:47.120 INFO:teuthology.orchestra.run.vm09.stdout:Transaction check succeeded. 2026-03-10T12:12:47.120 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction test 2026-03-10T12:12:47.183 INFO:teuthology.orchestra.run.vm09.stdout:Transaction test succeeded. 2026-03-10T12:12:47.196 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction 2026-03-10T12:12:47.268 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-10T12:12:47.268 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-10T12:12:47.268 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-10T12:12:47.268 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-10T12:12:47.268 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout:Installed: 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:12:47.373 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-10T12:12:47.378 INFO:teuthology.orchestra.run.vm09.stdout: Preparing : 1/1 2026-03-10T12:12:47.392 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-10T12:12:47.405 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-10T12:12:47.416 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-10T12:12:47.423 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-10T12:12:47.425 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-10T12:12:47.443 DEBUG:teuthology.parallel:result is None 2026-03-10T12:12:47.606 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-10T12:12:47.610 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-10T12:12:47.977 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-10T12:12:47.977 INFO:teuthology.orchestra.run.vm09.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-10T12:12:47.977 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:12:48.544 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-10T12:12:48.544 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-10T12:12:48.544 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-10T12:12:48.544 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-10T12:12:48.544 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout:Installed: 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:12:48.636 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-10T12:12:48.701 DEBUG:teuthology.parallel:result is None 2026-03-10T12:12:48.701 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-10T12:12:48.746 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-10T12:12:48.746 INFO:tasks.cephadm:Cluster image is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T12:12:48.746 INFO:tasks.cephadm:Cluster fsid is 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:12:48.746 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-10T12:12:48.746 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-10T12:12:48.746 INFO:tasks.cephadm:Monitor IPs: {'mon.vm03': '192.168.123.103', 'mon.vm09': '192.168.123.109'} 2026-03-10T12:12:48.746 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-10T12:12:48.746 DEBUG:teuthology.orchestra.run.vm03:> sudo hostname $(hostname -s) 2026-03-10T12:12:48.783 DEBUG:teuthology.orchestra.run.vm09:> sudo hostname $(hostname -s) 2026-03-10T12:12:48.827 INFO:tasks.cephadm:Downloading "compiled" cephadm from cachra 2026-03-10T12:12:48.827 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T12:12:49.457 INFO:tasks.cephadm:builder_project result: [{'url': 'https://3.chacra.ceph.com/r/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'chacra_url': 'https://3.chacra.ceph.com/repos/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'ref': 'squid', 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df', 'distro': 'centos', 'distro_version': '9', 'distro_codename': None, 'modified': '2026-02-25 18:55:15.146628', 'status': 'ready', 'flavor': 'default', 'project': 'ceph', 'archs': ['source', 'x86_64'], 'extra': {'version': '19.2.3-678-ge911bdeb', 'package_manager_version': '19.2.3-678.ge911bdeb', 'build_url': 'https://jenkins.ceph.com/job/ceph-dev-pipeline/3275/', 'root_build_cause': '', 'node_name': '10.20.192.26+soko16', 'job_name': 'ceph-dev-pipeline'}}] 2026-03-10T12:12:50.113 INFO:tasks.util.chacra:got chacra host 3.chacra.ceph.com, ref squid, sha1 e911bdebe5c8faa3800735d1568fcdca65db60df from https://shaman.ceph.com/api/search/?project=ceph&distros=centos%2F9%2Fx86_64&flavor=default&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T12:12:50.114 INFO:tasks.cephadm:Discovered cachra url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-10T12:12:50.114 INFO:tasks.cephadm:Downloading cephadm from url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-10T12:12:50.114 DEBUG:teuthology.orchestra.run.vm03:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T12:12:51.719 INFO:teuthology.orchestra.run.vm03.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 10 12:12 /home/ubuntu/cephtest/cephadm 2026-03-10T12:12:51.719 DEBUG:teuthology.orchestra.run.vm09:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T12:12:53.228 INFO:teuthology.orchestra.run.vm09.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 10 12:12 /home/ubuntu/cephtest/cephadm 2026-03-10T12:12:53.228 DEBUG:teuthology.orchestra.run.vm03:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T12:12:53.246 DEBUG:teuthology.orchestra.run.vm09:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T12:12:53.273 INFO:tasks.cephadm:Pulling image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on all hosts... 2026-03-10T12:12:53.273 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-10T12:12:53.288 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-10T12:12:53.462 INFO:teuthology.orchestra.run.vm03.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T12:12:53.534 INFO:teuthology.orchestra.run.vm09.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T12:13:29.211 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-10T12:13:29.211 INFO:teuthology.orchestra.run.vm03.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-10T12:13:29.211 INFO:teuthology.orchestra.run.vm03.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-10T12:13:29.211 INFO:teuthology.orchestra.run.vm03.stdout: "repo_digests": [ 2026-03-10T12:13:29.211 INFO:teuthology.orchestra.run.vm03.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-10T12:13:29.211 INFO:teuthology.orchestra.run.vm03.stdout: ] 2026-03-10T12:13:29.211 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-10T12:13:46.735 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-03-10T12:13:46.735 INFO:teuthology.orchestra.run.vm09.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-10T12:13:46.735 INFO:teuthology.orchestra.run.vm09.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-10T12:13:46.735 INFO:teuthology.orchestra.run.vm09.stdout: "repo_digests": [ 2026-03-10T12:13:46.735 INFO:teuthology.orchestra.run.vm09.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-10T12:13:46.735 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-03-10T12:13:46.735 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-03-10T12:13:46.749 DEBUG:teuthology.orchestra.run.vm03:> sudo mkdir -p /etc/ceph 2026-03-10T12:13:46.775 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /etc/ceph 2026-03-10T12:13:46.800 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 777 /etc/ceph 2026-03-10T12:13:46.838 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 777 /etc/ceph 2026-03-10T12:13:46.864 INFO:tasks.cephadm:Writing seed config... 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-10T12:13:46.864 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-10T12:13:46.864 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:13:46.864 DEBUG:teuthology.orchestra.run.vm03:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-10T12:13:46.893 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 7444ff0e-1c7a-11f1-9305-473e10361f26 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-10T12:13:46.893 DEBUG:teuthology.orchestra.run.vm03:mon.vm03> sudo journalctl -f -n 0 -u ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03.service 2026-03-10T12:13:46.935 INFO:tasks.cephadm:Bootstrapping... 2026-03-10T12:13:46.935 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df -v bootstrap --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.103 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-10T12:13:47.078 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-10T12:13:47.079 INFO:teuthology.orchestra.run.vm03.stdout:cephadm ['--image', 'quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df', '-v', 'bootstrap', '--fsid', '7444ff0e-1c7a-11f1-9305-473e10361f26', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.103', '--skip-admin-label'] 2026-03-10T12:13:47.079 INFO:teuthology.orchestra.run.vm03.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-10T12:13:47.079 INFO:teuthology.orchestra.run.vm03.stdout:Verifying podman|docker is present... 2026-03-10T12:13:47.100 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-10T12:13:47.100 INFO:teuthology.orchestra.run.vm03.stdout:Verifying lvm2 is present... 2026-03-10T12:13:47.100 INFO:teuthology.orchestra.run.vm03.stdout:Verifying time synchronization is in place... 2026-03-10T12:13:47.107 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-10T12:13:47.107 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T12:13:47.112 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-10T12:13:47.112 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-10T12:13:47.117 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-10T12:13:47.122 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-10T12:13:47.123 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-10T12:13:47.123 INFO:teuthology.orchestra.run.vm03.stdout:Repeating the final host check... 2026-03-10T12:13:47.141 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-10T12:13:47.141 INFO:teuthology.orchestra.run.vm03.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-10T12:13:47.141 INFO:teuthology.orchestra.run.vm03.stdout:systemctl is present 2026-03-10T12:13:47.141 INFO:teuthology.orchestra.run.vm03.stdout:lvcreate is present 2026-03-10T12:13:47.147 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-10T12:13:47.147 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T12:13:47.152 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-10T12:13:47.152 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-10T12:13:47.157 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-10T12:13:47.161 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Host looks OK 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Cluster fsid: 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Acquiring lock 140372491834992 on /run/cephadm/7444ff0e-1c7a-11f1-9305-473e10361f26.lock 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Lock 140372491834992 acquired on /run/cephadm/7444ff0e-1c7a-11f1-9305-473e10361f26.lock 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 3300 ... 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 6789 ... 2026-03-10T12:13:47.162 INFO:teuthology.orchestra.run.vm03.stdout:Base mon IP(s) is [192.168.123.103:3300, 192.168.123.103:6789], mon addrv is [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-10T12:13:47.165 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.103 metric 100 2026-03-10T12:13:47.165 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.103 metric 100 2026-03-10T12:13:47.167 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-10T12:13:47.167 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:3/64 scope link noprefixroute 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-10T12:13:47.169 INFO:teuthology.orchestra.run.vm03.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-10T12:13:47.170 INFO:teuthology.orchestra.run.vm03.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-10T12:13:47.170 INFO:teuthology.orchestra.run.vm03.stdout:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T12:13:48.405 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-10T12:13:48.405 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Trying to pull quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T12:13:48.405 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Getting image source signatures 2026-03-10T12:13:48.405 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:1752b8d01aa0dd33bbe0ab24e8316174c94fbdcd5d26252e2680bba0624747a7 2026-03-10T12:13:48.405 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:8e380faede39ebd4286247457b408d979ab568aafd8389c42ec304b8cfba4e92 2026-03-10T12:13:48.405 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying config sha256:654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-10T12:13:48.405 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-10T12:13:48.540 INFO:teuthology.orchestra.run.vm03.stdout:ceph: stdout ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-10T12:13:48.541 INFO:teuthology.orchestra.run.vm03.stdout:Ceph version: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-10T12:13:48.541 INFO:teuthology.orchestra.run.vm03.stdout:Extracting ceph user uid/gid from container image... 2026-03-10T12:13:48.633 INFO:teuthology.orchestra.run.vm03.stdout:stat: stdout 167 167 2026-03-10T12:13:48.633 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial keys... 2026-03-10T12:13:48.725 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQD8CrBpXLIiKRAAu0fOt7N+nf+s2wX71fhFoA== 2026-03-10T12:13:48.800 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQD8CrBp0uG8LhAAoeFcjYbB8XLyNVJB42NAbw== 2026-03-10T12:13:48.899 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQD8CrBpC2aCMxAAOxYMpw7IwPLo9R/v2e6yfg== 2026-03-10T12:13:48.900 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial monmap... 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:monmaptool for vm03 [v2:192.168.123.103:3300,v1:192.168.123.103:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:setting min_mon_release = quincy 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: set fsid to 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:13:49.019 INFO:teuthology.orchestra.run.vm03.stdout:Creating mon... 2026-03-10T12:13:49.146 INFO:teuthology.orchestra.run.vm03.stdout:create mon.vm03 on 2026-03-10T12:13:49.399 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-10T12:13:49.520 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26.target → /etc/systemd/system/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26.target. 2026-03-10T12:13:49.520 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26.target → /etc/systemd/system/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26.target. 2026-03-10T12:13:49.663 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03 2026-03-10T12:13:49.663 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03.service: Unit ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03.service not loaded. 2026-03-10T12:13:49.793 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26.target.wants/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03.service → /etc/systemd/system/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@.service. 2026-03-10T12:13:49.944 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-10T12:13:49.944 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-10T12:13:49.944 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon to start... 2026-03-10T12:13:49.944 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon... 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout cluster: 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout id: 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout services: 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm03 (age 0.123783s) 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout data: 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pgs: 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:mon is available 2026-03-10T12:13:50.140 INFO:teuthology.orchestra.run.vm03.stdout:Assimilating anything we can from ceph.conf... 2026-03-10T12:13:50.304 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:50.304 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-10T12:13:50.304 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:50.304 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-10T12:13:50.304 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-10T12:13:50.305 INFO:teuthology.orchestra.run.vm03.stdout:Generating new minimal ceph.conf... 2026-03-10T12:13:50.467 INFO:teuthology.orchestra.run.vm03.stdout:Restarting the monitor... 2026-03-10T12:13:50.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03[46812]: 2026-03-10T12:13:50.536+0000 7ff67d541640 -1 mon.vm03@0(leader) e1 *** Got Signal Terminated *** 2026-03-10T12:13:50.990 INFO:teuthology.orchestra.run.vm03.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 podman[47020]: 2026-03-10 12:13:50.792644862 +0000 UTC m=+0.268185506 container died 32a78d80b08d5c807474bee1b920b46a491dfc317591c823e2d71bb75d6026e4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 podman[47020]: 2026-03-10 12:13:50.815456421 +0000 UTC m=+0.290997065 container remove 32a78d80b08d5c807474bee1b920b46a491dfc317591c823e2d71bb75d6026e4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0) 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 bash[47020]: ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 systemd[1]: ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03.service: Deactivated successfully. 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 systemd[1]: Stopped Ceph mon.vm03 for 7444ff0e-1c7a-11f1-9305-473e10361f26. 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 systemd[1]: Starting Ceph mon.vm03 for 7444ff0e-1c7a-11f1-9305-473e10361f26... 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 podman[47091]: 2026-03-10 12:13:50.947245776 +0000 UTC m=+0.014400125 container create e2f02b4b230519a40354ecfaaae0b3d6e05a431b0d2c50c64516237e436f464e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 podman[47091]: 2026-03-10 12:13:50.976977119 +0000 UTC m=+0.044131468 container init e2f02b4b230519a40354ecfaaae0b3d6e05a431b0d2c50c64516237e436f464e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, ceph=True, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 podman[47091]: 2026-03-10 12:13:50.980157332 +0000 UTC m=+0.047311681 container start e2f02b4b230519a40354ecfaaae0b3d6e05a431b0d2c50c64516237e436f464e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS) 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 bash[47091]: e2f02b4b230519a40354ecfaaae0b3d6e05a431b0d2c50c64516237e436f464e 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 podman[47091]: 2026-03-10 12:13:50.941592094 +0000 UTC m=+0.008746453 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 systemd[1]: Started Ceph mon.vm03 for 7444ff0e-1c7a-11f1-9305-473e10361f26. 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: pidfile_write: ignore empty --pid-file 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: load: jerasure load: lrc 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: RocksDB version: 7.9.2 2026-03-10T12:13:51.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Git sha 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: DB SUMMARY 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: DB Session ID: SX64T3TU7ZZG66SA93HH 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: CURRENT file: CURRENT 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm03/store.db dir, Total Num: 1, files: 000008.sst 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm03/store.db: 000009.log size: 75099 ; 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.error_if_exists: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.create_if_missing: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.paranoid_checks: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.env: 0x564c5493ddc0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.info_log: 0x564c56086b20 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.statistics: (nil) 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.use_fsync: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_log_file_size: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.allow_fallocate: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.use_direct_reads: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.db_log_dir: 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.wal_dir: 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.write_buffer_manager: 0x564c5608b900 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.unordered_write: 0 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T12:13:51.054 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.row_cache: None 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.wal_filter: None 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.two_write_queues: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.wal_compression: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.atomic_flush: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.log_readahead_size: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_background_jobs: 2 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_background_compactions: -1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_subcompactions: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_open_files: -1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_background_flushes: -1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Compression algorithms supported: 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kZSTD supported: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kXpressCompression supported: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kBZip2Compression supported: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kLZ4Compression supported: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kZlibCompression supported: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: kSnappyCompression supported: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm03/store.db/MANIFEST-000010 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.merge_operator: 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compaction_filter: None 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x564c560866e0) 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: cache_index_and_filter_blocks: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: pin_top_level_index_and_filter: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: index_type: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: data_block_index_type: 0 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: index_shortening: 1 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: checksum: 4 2026-03-10T12:13:51.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout: no_block_cache: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_cache: 0x564c560ab350 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_cache_name: BinnedLRUCache 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_cache_options: 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: capacity : 536870912 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: num_shard_bits : 4 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: strict_capacity_limit : 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: high_pri_pool_ratio: 0.000 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_cache_compressed: (nil) 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: persistent_cache: (nil) 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_size: 4096 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_size_deviation: 10 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_restart_interval: 16 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: index_block_restart_interval: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: metadata_block_size: 4096 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: partition_filters: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: use_delta_encoding: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: filter_policy: bloomfilter 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: whole_key_filtering: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: verify_compression: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: read_amp_bytes_per_bit: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: format_version: 5 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: enable_index_compression: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: block_align: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: max_auto_readahead_size: 262144 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: prepopulate_block_cache: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: initial_auto_readahead_size: 8192 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression: NoCompression 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.num_levels: 7 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T12:13:51.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:50 vm03 ceph-mon[47106]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.inplace_update_support: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.bloom_locality: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.max_successive_merges: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.ttl: 2592000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.enable_blob_files: false 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.min_blob_size: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm03/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 093658c9-20b9-49cc-b8d8-a246fa31ebaf 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773144831002546, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773144831007904, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 72167, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 223, "table_properties": {"data_size": 70446, "index_size": 174, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 9562, "raw_average_key_size": 49, "raw_value_size": 65071, "raw_average_value_size": 335, "num_data_blocks": 8, "num_entries": 194, "num_filter_entries": 194, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773144831, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "093658c9-20b9-49cc-b8d8-a246fa31ebaf", "db_session_id": "SX64T3TU7ZZG66SA93HH", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773144831007969, "job": 1, "event": "recovery_finished"} 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm03/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x564c560ace00 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: DB pointer 0x564c561c6000 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** DB Stats ** 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T12:13:51.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** Compaction Stats [default] ** 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: L0 2/0 72.35 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.3 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Sum 2/0 72.35 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.3 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 13.3 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** Compaction Stats [default] ** 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 13.3 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative compaction: 0.00 GB write, 6.18 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval compaction: 0.00 GB write, 6.18 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Block cache BinnedLRUCache@0x564c560ab350#2 capacity: 512.00 MB usage: 1.06 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 8e-06 secs_since: 0 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: starting mon.vm03 rank 0 at public addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] at bind addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon_data /var/lib/ceph/mon/ceph-vm03 fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???) e1 preinit fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).mds e1 new map 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).mds e1 print_map 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: e1 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: btime 2026-03-10T12:13:49:971376+0000 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: legacy client fscid: -1 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout: No filesystems configured 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).mgr e0 loading version 1 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).mgr e1 active server: (0) 2026-03-10T12:13:51.058 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03@-1(???).mgr e1 mkfs or daemon transitioned to available, loading commands 2026-03-10T12:13:51.173 INFO:teuthology.orchestra.run.vm03.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-10T12:13:51.174 INFO:teuthology.orchestra.run.vm03.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-10T12:13:51.174 INFO:teuthology.orchestra.run.vm03.stdout:Creating mgr... 2026-03-10T12:13:51.175 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-10T12:13:51.175 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-10T12:13:51.175 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:8443 ... 2026-03-10T12:13:51.317 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mgr.vm03.oxmxtj 2026-03-10T12:13:51.317 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mgr.vm03.oxmxtj.service: Unit ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mgr.vm03.oxmxtj.service not loaded. 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mon.vm03 is new leader, mons vm03 in quorum (ranks 0) 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: monmap epoch 1 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: last_changed 2026-03-10T12:13:48.965537+0000 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: created 2026-03-10T12:13:48.965537+0000 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: min_mon_release 19 (squid) 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: election_strategy: 1 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.vm03 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: fsmap 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T12:13:51.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:51 vm03 ceph-mon[47106]: mgrmap e1: no daemons active 2026-03-10T12:13:51.445 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26.target.wants/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mgr.vm03.oxmxtj.service → /etc/systemd/system/ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@.service. 2026-03-10T12:13:51.584 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-10T12:13:51.585 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-10T12:13:51.585 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-10T12:13:51.585 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-03-10T12:13:51.585 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr to start... 2026-03-10T12:13:51.585 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr... 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "7444ff0e-1c7a-11f1-9305-473e10361f26", 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 0, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T12:13:51.778 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T12:13:49:971376+0000", 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T12:13:49.973371+0000", 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-10T12:13:51.779 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (1/15)... 2026-03-10T12:13:52.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:52 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/4021797723' entity='client.admin' 2026-03-10T12:13:52.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:52 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/1070720680' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T12:13:53.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:53.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-10T12:13:53.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "7444ff0e-1c7a-11f1-9305-473e10361f26", 2026-03-10T12:13:53.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T12:13:53.996 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T12:13:53.996 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T12:13:53.997 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T12:13:53.997 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:53.997 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T12:13:53.997 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T12:13:53.997 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-10T12:13:53.997 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 2, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T12:13:49:971376+0000", 2026-03-10T12:13:53.998 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T12:13:49.973371+0000", 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-10T12:13:53.999 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (2/15)... 2026-03-10T12:13:54.258 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:53 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/920923954' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: Activating manager daemon vm03.oxmxtj 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: mgrmap e2: vm03.oxmxtj(active, starting, since 0.00376507s) 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr metadata", "who": "vm03.oxmxtj", "id": "vm03.oxmxtj"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: Manager daemon vm03.oxmxtj is now available 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/mirror_snapshot_schedule"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/trash_purge_schedule"}]: dispatch 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' 2026-03-10T12:13:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:54 vm03 ceph-mon[47106]: from='mgr.14100 192.168.123.103:0/681273620' entity='mgr.vm03.oxmxtj' 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "7444ff0e-1c7a-11f1-9305-473e10361f26", 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T12:13:56.270 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T12:13:49:971376+0000", 2026-03-10T12:13:56.271 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T12:13:49.973371+0000", 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-10T12:13:56.272 INFO:teuthology.orchestra.run.vm03.stdout:mgr is available 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-10T12:13:56.514 INFO:teuthology.orchestra.run.vm03.stdout:Enabling cephadm module... 2026-03-10T12:13:56.553 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:56 vm03 ceph-mon[47106]: mgrmap e3: vm03.oxmxtj(active, since 1.00793s) 2026-03-10T12:13:56.553 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:56 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3408646098' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T12:13:56.553 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:56 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/1929250840' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T12:13:58.041 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:57 vm03 ceph-mon[47106]: mgrmap e4: vm03.oxmxtj(active, since 2s) 2026-03-10T12:13:58.041 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:57 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/474024341' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "vm03.oxmxtj", 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-10T12:13:58.311 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 5... 2026-03-10T12:13:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:58 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/474024341' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T12:13:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:58 vm03 ceph-mon[47106]: mgrmap e5: vm03.oxmxtj(active, since 3s) 2026-03-10T12:13:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:13:58 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2619209260' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: Active manager daemon vm03.oxmxtj restarted 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: Activating manager daemon vm03.oxmxtj 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: osdmap e2: 0 total, 0 up, 0 in 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: mgrmap e6: vm03.oxmxtj(active, starting, since 0.0911163s) 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr metadata", "who": "vm03.oxmxtj", "id": "vm03.oxmxtj"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: Manager daemon vm03.oxmxtj is now available 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/mirror_snapshot_schedule"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:00 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/trash_purge_schedule"}]: dispatch 2026-03-10T12:14:01.908 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-10T12:14:01.908 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-10T12:14:01.908 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-10T12:14:01.908 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-10T12:14:01.908 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 5 is available 2026-03-10T12:14:01.908 INFO:teuthology.orchestra.run.vm03.stdout:Setting orchestrator backend to cephadm... 2026-03-10T12:14:02.381 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:02 vm03 ceph-mon[47106]: Found migration_current of "None". Setting to last migration. 2026-03-10T12:14:02.381 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:02 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:02.381 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:02 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:02.381 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:02 vm03 ceph-mon[47106]: mgrmap e7: vm03.oxmxtj(active, since 1.09411s) 2026-03-10T12:14:02.382 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:02 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:02.382 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:02 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:02.382 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:02 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:02.384 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-10T12:14:02.384 INFO:teuthology.orchestra.run.vm03.stdout:Generating ssh key... 2026-03-10T12:14:02.902 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIv6I20QuSyUAgDrvGFkaGDyNPaU9Npk2opvbBR1yiB ceph-7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:14:02.902 INFO:teuthology.orchestra.run.vm03.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-10T12:14:02.902 INFO:teuthology.orchestra.run.vm03.stdout:Adding key to root@localhost authorized_keys... 2026-03-10T12:14:02.903 INFO:teuthology.orchestra.run.vm03.stdout:Adding host vm03... 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:01] ENGINE Bus STARTING 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:01] ENGINE Serving on https://192.168.123.103:7150 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:01] ENGINE Client ('192.168.123.103', 56608) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:02] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:02] ENGINE Bus STARTED 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: Generating ssh key... 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:03.735 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:03 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:04.634 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Added host 'vm03' with addr '192.168.123.103' 2026-03-10T12:14:04.634 INFO:teuthology.orchestra.run.vm03.stdout:Deploying mon service with default placement... 2026-03-10T12:14:04.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:04 vm03 ceph-mon[47106]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:04.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:04 vm03 ceph-mon[47106]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "addr": "192.168.123.103", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:04.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:04 vm03 ceph-mon[47106]: Deploying cephadm binary to vm03 2026-03-10T12:14:04.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:04 vm03 ceph-mon[47106]: mgrmap e8: vm03.oxmxtj(active, since 2s) 2026-03-10T12:14:04.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:04 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:04.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:04 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:04.904 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-10T12:14:04.904 INFO:teuthology.orchestra.run.vm03.stdout:Deploying mgr service with default placement... 2026-03-10T12:14:05.150 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-10T12:14:05.151 INFO:teuthology.orchestra.run.vm03.stdout:Deploying crash service with default placement... 2026-03-10T12:14:05.386 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-03-10T12:14:05.386 INFO:teuthology.orchestra.run.vm03.stdout:Deploying ceph-exporter service with default placement... 2026-03-10T12:14:05.639 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:05 vm03 ceph-mon[47106]: Added host vm03 2026-03-10T12:14:05.640 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:05 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:05.640 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:05 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:05.640 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:05 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:05.640 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:05 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:05.658 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-03-10T12:14:05.658 INFO:teuthology.orchestra.run.vm03.stdout:Deploying prometheus service with default placement... 2026-03-10T12:14:05.896 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-03-10T12:14:05.896 INFO:teuthology.orchestra.run.vm03.stdout:Deploying grafana service with default placement... 2026-03-10T12:14:06.350 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-03-10T12:14:06.350 INFO:teuthology.orchestra.run.vm03.stdout:Deploying node-exporter service with default placement... 2026-03-10T12:14:06.632 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-03-10T12:14:06.632 INFO:teuthology.orchestra.run.vm03.stdout:Deploying alertmanager service with default placement... 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: Saving service mon spec with placement count:5 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: Saving service mgr spec with placement count:2 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: Saving service crash spec with placement * 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: Saving service ceph-exporter spec with placement * 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:06.726 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:06 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:06.919 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-03-10T12:14:07.446 INFO:teuthology.orchestra.run.vm03.stdout:Enabling the dashboard module... 2026-03-10T12:14:07.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: Saving service prometheus spec with placement count:1 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: Saving service grafana spec with placement count:1 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: Saving service node-exporter spec with placement * 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/31369703' entity='client.admin' 2026-03-10T12:14:07.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:07 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/838148595' entity='client.admin' 2026-03-10T12:14:08.632 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:08 vm03 ceph-mon[47106]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:08.632 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:08 vm03 ceph-mon[47106]: Saving service alertmanager spec with placement count:1 2026-03-10T12:14:08.632 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:08 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/4178718563' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T12:14:08.632 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:08 vm03 ceph-mon[47106]: from='mgr.14118 192.168.123.103:0/4293556369' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "vm03.oxmxtj", 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-10T12:14:08.999 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 9... 2026-03-10T12:14:09.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:09 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/4178718563' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T12:14:09.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:09 vm03 ceph-mon[47106]: mgrmap e9: vm03.oxmxtj(active, since 7s) 2026-03-10T12:14:09.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:09 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/823826344' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: Active manager daemon vm03.oxmxtj restarted 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: Activating manager daemon vm03.oxmxtj 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: osdmap e3: 0 total, 0 up, 0 in 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: mgrmap e10: vm03.oxmxtj(active, starting, since 0.0052837s) 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr metadata", "who": "vm03.oxmxtj", "id": "vm03.oxmxtj"}]: dispatch 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: Manager daemon vm03.oxmxtj is now available 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:11 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/mirror_snapshot_schedule"}]: dispatch 2026-03-10T12:14:12.679 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-10T12:14:12.679 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-10T12:14:12.679 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-10T12:14:12.679 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-10T12:14:12.679 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 9 is available 2026-03-10T12:14:12.679 INFO:teuthology.orchestra.run.vm03.stdout:Generating a dashboard self-signed certificate... 2026-03-10T12:14:12.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:12 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/trash_purge_schedule"}]: dispatch 2026-03-10T12:14:12.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:12 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:12.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:12 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:12.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:12 vm03 ceph-mon[47106]: mgrmap e11: vm03.oxmxtj(active, since 1.00775s) 2026-03-10T12:14:13.000 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-10T12:14:13.000 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial admin user... 2026-03-10T12:14:13.392 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$0hTcGivkHDa5FpWmfahO9eKPZEgQmfW2cLNVyKaHGCPjFZHGW1tJm", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773144853, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-10T12:14:13.392 INFO:teuthology.orchestra.run.vm03.stdout:Fetching dashboard port number... 2026-03-10T12:14:13.643 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 8443 2026-03-10T12:14:13.643 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-10T12:14:13.643 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-10T12:14:13.644 INFO:teuthology.orchestra.run.vm03.stdout:Ceph Dashboard is now available at: 2026-03-10T12:14:13.644 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.644 INFO:teuthology.orchestra.run.vm03.stdout: URL: https://vm03.local:8443/ 2026-03-10T12:14:13.644 INFO:teuthology.orchestra.run.vm03.stdout: User: admin 2026-03-10T12:14:13.644 INFO:teuthology.orchestra.run.vm03.stdout: Password: kmtn0i4mpb 2026-03-10T12:14:13.644 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.644 INFO:teuthology.orchestra.run.vm03.stdout:Saving cluster configuration to /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config directory 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:12] ENGINE Bus STARTING 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:12] ENGINE Serving on https://192.168.123.103:7150 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:12] ENGINE Client ('192.168.123.103', 47884) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:12] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:12] ENGINE Bus STARTED 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:13.890 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:13 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3175357933' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout:Or, if you are only running a single cluster on this host: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: ceph telemetry on 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout:For more information see: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.938 INFO:teuthology.orchestra.run.vm03.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-10T12:14:13.939 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:13.939 INFO:teuthology.orchestra.run.vm03.stdout:Bootstrap complete. 2026-03-10T12:14:13.966 INFO:tasks.cephadm:Fetching config... 2026-03-10T12:14:13.966 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:14:13.966 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-10T12:14:13.993 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-10T12:14:13.993 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:14:13.993 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-10T12:14:14.061 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-10T12:14:14.061 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:14:14.061 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/keyring of=/dev/stdout 2026-03-10T12:14:14.133 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-10T12:14:14.133 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:14:14.133 DEBUG:teuthology.orchestra.run.vm03:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-10T12:14:14.189 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-10T12:14:14.189 DEBUG:teuthology.orchestra.run.vm03:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIv6I20QuSyUAgDrvGFkaGDyNPaU9Npk2opvbBR1yiB ceph-7444ff0e-1c7a-11f1-9305-473e10361f26' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T12:14:14.279 INFO:teuthology.orchestra.run.vm03.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIv6I20QuSyUAgDrvGFkaGDyNPaU9Npk2opvbBR1yiB ceph-7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:14:14.298 DEBUG:teuthology.orchestra.run.vm09:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIv6I20QuSyUAgDrvGFkaGDyNPaU9Npk2opvbBR1yiB ceph-7444ff0e-1c7a-11f1-9305-473e10361f26' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T12:14:14.331 INFO:teuthology.orchestra.run.vm09.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIv6I20QuSyUAgDrvGFkaGDyNPaU9Npk2opvbBR1yiB ceph-7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:14:14.343 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-10T12:14:14.537 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:14:14.820 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:14 vm03 ceph-mon[47106]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:14.820 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:14 vm03 ceph-mon[47106]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:14.820 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:14 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2228656306' entity='client.admin' 2026-03-10T12:14:14.820 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:14 vm03 ceph-mon[47106]: mgrmap e12: vm03.oxmxtj(active, since 2s) 2026-03-10T12:14:14.864 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-10T12:14:14.864 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-10T12:14:15.067 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:14:15.385 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm09 2026-03-10T12:14:15.385 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-10T12:14:15.385 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.conf 2026-03-10T12:14:15.399 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-10T12:14:15.399 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T12:14:15.454 INFO:tasks.cephadm:Adding host vm09 to orchestrator... 2026-03-10T12:14:15.454 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch host add vm09 2026-03-10T12:14:15.633 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:14:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:15 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2521921715' entity='client.admin' 2026-03-10T12:14:15.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:15 vm03 ceph-mon[47106]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:15.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:15 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: Updating vm03:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: Deploying cephadm binary to vm09 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: Updating vm03:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.client.admin.keyring 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:17.328 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:17 vm03 ceph-mon[47106]: Deploying daemon ceph-exporter.vm03 on vm03 2026-03-10T12:14:17.555 INFO:teuthology.orchestra.run.vm03.stdout:Added host 'vm09' with addr '192.168.123.109' 2026-03-10T12:14:17.615 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch host ls --format=json 2026-03-10T12:14:17.963 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:14:18.233 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:18.233 INFO:teuthology.orchestra.run.vm03.stdout:[{"addr": "192.168.123.103", "hostname": "vm03", "labels": [], "status": ""}, {"addr": "192.168.123.109", "hostname": "vm09", "labels": [], "status": ""}] 2026-03-10T12:14:18.291 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-10T12:14:18.291 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd crush tunables default 2026-03-10T12:14:18.489 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:14:18.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T12:14:18.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T12:14:18.496 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:18.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: Deploying daemon crash.vm03 on vm03 2026-03-10T12:14:18.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: Added host vm09 2026-03-10T12:14:18.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:18.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:18 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:19.557 INFO:teuthology.orchestra.run.vm03.stderr:adjusted tunables profile to default 2026-03-10T12:14:19.608 INFO:tasks.cephadm:Adding mon.vm03 on vm03 2026-03-10T12:14:19.608 INFO:tasks.cephadm:Adding mon.vm09 on vm09 2026-03-10T12:14:19.608 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch apply mon '2;vm03:192.168.123.103=vm03;vm09:192.168.123.109=vm09' 2026-03-10T12:14:19.760 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:19.790 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:19.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:19 vm03 ceph-mon[47106]: Deploying daemon node-exporter.vm03 on vm03 2026-03-10T12:14:19.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:19 vm03 ceph-mon[47106]: from='client.14189 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:14:19.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:19 vm03 ceph-mon[47106]: mgrmap e13: vm03.oxmxtj(active, since 6s) 2026-03-10T12:14:19.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:19 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/420781495' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T12:14:20.010 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mon update... 2026-03-10T12:14:20.084 DEBUG:teuthology.orchestra.run.vm09:mon.vm09> sudo journalctl -f -n 0 -u ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm09.service 2026-03-10T12:14:20.085 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:20.085 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:20.278 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:20.315 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:20.573 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:20.573 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:20.573 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:20.820 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:20 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/420781495' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T12:14:20.820 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:20 vm03 ceph-mon[47106]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T12:14:20.820 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:20 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:21.644 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:21.644 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:21.795 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:21.827 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:21.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:21 vm03 ceph-mon[47106]: from='client.14193 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm03:192.168.123.103=vm03;vm09:192.168.123.109=vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:14:21.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:21 vm03 ceph-mon[47106]: Saving service mon spec with placement vm03:192.168.123.103=vm03;vm09:192.168.123.109=vm09;count:2 2026-03-10T12:14:21.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:21 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/2921988989' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:21.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:21 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:21.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:21 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:21.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:21 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:21.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:21 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:22.060 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:22.060 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:22.060 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:23.131 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:23.131 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:23.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:22 vm03 ceph-mon[47106]: Deploying daemon alertmanager.vm03 on vm03 2026-03-10T12:14:23.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:22 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:23.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:22 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/2417407496' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:23.295 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:23.330 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:23.585 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:23.585 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:23.585 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:23.991 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:23 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/849775909' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:24.629 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:24.629 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:24.778 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:24.813 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:25.050 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:25.050 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:25.050 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: Deploying daemon grafana.vm03 on vm03 2026-03-10T12:14:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:25 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/1573734763' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:26.091 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:26.091 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:26.250 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:26.285 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:26.525 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:26.525 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:26.525 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:26.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:26 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/1314350649' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:27.585 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:27.586 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:27.739 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:27.772 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:28.022 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:28.023 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:28.023 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:28.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:27 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:29.092 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:29.093 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:29.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:28 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/2935030745' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:29.246 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:29.283 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:29.547 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:29.547 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:29.547 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:30.046 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:29 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3466296841' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:30.801 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:30.801 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:30.953 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:30.987 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:31.229 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:31.229 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:31.229 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: Deploying daemon prometheus.vm03 on vm03 2026-03-10T12:14:31.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:31 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/4279466880' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:32.272 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:32.272 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:32.419 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:32.453 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:32.697 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:32.697 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:32.697 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:33.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:32 vm03 ceph-mon[47106]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:14:33.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:32 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:33.754 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:33.755 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:33.904 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:33.944 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:34.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:33 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/2462786072' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:34.196 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:34.196 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:34.196 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:35.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:34 vm03 ceph-mon[47106]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:14:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:34 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/4259495183' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:35.239 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:35.239 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:35.396 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:35.434 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:35.684 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:35.684 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:35.684 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:35.914 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:35 vm03 ceph-mon[47106]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:14:35.914 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:35 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/203881594' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:36.747 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:36.748 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:36.895 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:36.930 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:37.192 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:37.192 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:37.192 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:37.351 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:36 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:37.351 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:36 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:37.351 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:36 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:37.351 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:36 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T12:14:37.351 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:36 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:38.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:37 vm03 ceph-mon[47106]: from='mgr.14162 192.168.123.103:0/3143487303' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T12:14:38.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:37 vm03 ceph-mon[47106]: mgrmap e14: vm03.oxmxtj(active, since 25s) 2026-03-10T12:14:38.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:37 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/4028685737' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:38.256 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:38.256 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:38.428 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:38.464 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:38.703 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:38.703 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:38.703 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:39.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:38 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/1731376652' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:39.760 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:39.761 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:39.920 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:39.973 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:40.252 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:40.252 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:40.252 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:40.344 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: Active manager daemon vm03.oxmxtj restarted 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: Activating manager daemon vm03.oxmxtj 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: mgrmap e15: vm03.oxmxtj(active, starting, since 0.00499077s) 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr metadata", "who": "vm03.oxmxtj", "id": "vm03.oxmxtj"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: Manager daemon vm03.oxmxtj is now available 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/mirror_snapshot_schedule"}]: dispatch 2026-03-10T12:14:40.345 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.oxmxtj/trash_purge_schedule"}]: dispatch 2026-03-10T12:14:41.261 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:41 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:41.261 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:41 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/4094163259' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:41.261 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:41 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:41.261 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:41 vm03 ceph-mon[47106]: mgrmap e16: vm03.oxmxtj(active, since 1.00805s) 2026-03-10T12:14:41.261 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:41 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:41.328 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:41.328 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:41.563 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:41.602 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T12:14:41.870 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:41.870 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:41.870 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:41] ENGINE Bus STARTING 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:41] ENGINE Serving on https://192.168.123.103:7150 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:41] ENGINE Client ('192.168.123.103', 43078) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:41] ENGINE Serving on http://192.168.123.103:8765 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: [10/Mar/2026:12:14:41] ENGINE Bus STARTED 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:14:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:42 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/1726582133' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:42.929 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:42.929 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:43.135 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm09:/etc/ceph/ceph.conf 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm09:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm03:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm09:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.client.admin.keyring 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: mgrmap e17: vm03.oxmxtj(active, since 2s) 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Updating vm03:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.client.admin.keyring 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:43.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:43 vm03 ceph-mon[47106]: Deploying daemon ceph-exporter.vm09 on vm09 2026-03-10T12:14:43.661 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:43.661 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:43.661 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/2268741253' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T12:14:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:44 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:44.731 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:44.731 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:44.918 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:45.216 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:45.216 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:45.217 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: Deploying daemon crash.vm09 on vm09 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: Deploying daemon node-exporter.vm09 on vm09 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:46.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:45 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3072551792' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:46.282 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:46.282 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:46.441 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:46.695 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:46.695 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:46.695 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:47.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:46 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/249740213' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:47.755 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:47.756 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:47.983 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:48.420 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:48.420 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:13:48.965537Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T12:14:48.420 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.pftowo", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.pftowo", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: Deploying daemon mgr.vm09.pftowo on vm09 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:48 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/1717395202' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:49.504 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T12:14:49.504 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mon dump -f json 2026-03-10T12:14:49.690 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm09/config 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: mon.vm03 calling monitor election 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: mon.vm09 calling monitor election 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.? 192.168.123.109:0/1275995043' entity='mgr.vm09.pftowo' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.pftowo/crt"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: mon.vm03 is new leader, mons vm03,vm09 in quorum (ranks 0,1) 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: monmap epoch 2 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: last_changed 2026-03-10T12:14:49.390344+0000 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: created 2026-03-10T12:13:48.965537+0000 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: min_mon_release 19 (squid) 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: election_strategy: 1 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.vm03 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: fsmap 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: osdmap e5: 0 total, 0 up, 0 in 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: mgrmap e17: vm03.oxmxtj(active, since 14s) 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: overall HEALTH_OK 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: Standby manager daemon vm09.pftowo started 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.? 192.168.123.109:0/1275995043' entity='mgr.vm09.pftowo' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.? 192.168.123.109:0/1275995043' entity='mgr.vm09.pftowo' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.pftowo/key"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.? 192.168.123.109:0/1275995043' entity='mgr.vm09.pftowo' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T12:14:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:54 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: mgrmap e18: vm03.oxmxtj(active, since 14s), standbys: vm09.pftowo 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr metadata", "who": "vm09.pftowo", "id": "vm09.pftowo"}]: dispatch 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: Updating vm03:/etc/ceph/ceph.conf 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: Updating vm09:/etc/ceph/ceph.conf 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:55.680 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:55 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:55 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:14:55.961 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-10T12:14:55.961 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":2,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","modified":"2026-03-10T12:14:49.390344Z","created":"2026-03-10T12:13:48.965537Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm09","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:3300","nonce":0},{"type":"v1","addr":"192.168.123.109:6789","nonce":0}]},"addr":"192.168.123.109:6789/0","public_addr":"192.168.123.109:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T12:14:55.961 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 2 2026-03-10T12:14:56.013 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-10T12:14:56.013 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph config generate-minimal-conf 2026-03-10T12:14:56.193 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:14:56.450 INFO:teuthology.orchestra.run.vm03.stdout:# minimal ceph.conf for 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:14:56.450 INFO:teuthology.orchestra.run.vm03.stdout:[global] 2026-03-10T12:14:56.450 INFO:teuthology.orchestra.run.vm03.stdout: fsid = 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:14:56.450 INFO:teuthology.orchestra.run.vm03.stdout: mon_host = [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-03-10T12:14:56.503 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-10T12:14:56.503 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:14:56.503 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T12:14:56.553 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:14:56.553 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T12:14:56.637 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-10T12:14:56.637 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T12:14:56.662 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-10T12:14:56.662 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Updating vm09:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Updating vm03:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Reconfiguring mon.vm03 (unknown last config time)... 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Reconfiguring daemon mon.vm03 on vm03 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Reconfiguring mgr.vm03.oxmxtj (unknown last config time)... 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.oxmxtj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Reconfiguring daemon mgr.vm03.oxmxtj on vm03 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Reconfiguring ceph-exporter.vm03 (monmap changed)... 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: Reconfiguring daemon ceph-exporter.vm03 on vm03 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3537903709' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.723 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:56 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3682019720' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.725 INFO:tasks.cephadm:Deploying OSDs... 2026-03-10T12:14:56.725 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:14:56.725 DEBUG:teuthology.orchestra.run.vm03:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T12:14:56.747 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:14:56.748 DEBUG:teuthology.orchestra.run.vm03:> ls /dev/[sv]d? 2026-03-10T12:14:56.805 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vda 2026-03-10T12:14:56.805 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdb 2026-03-10T12:14:56.805 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdc 2026-03-10T12:14:56.805 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdd 2026-03-10T12:14:56.805 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vde 2026-03-10T12:14:56.805 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T12:14:56.805 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T12:14:56.805 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdb 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdb 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,10 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 12:14:15.889425975 +0000 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 12:12:46.757075625 +0000 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 12:12:46.757075625 +0000 2026-03-10T12:14:56.866 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 12:09:35.260000000 +0000 2026-03-10T12:14:56.866 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Updating vm09:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Updating vm03:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/config/ceph.conf 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Reconfiguring mon.vm03 (unknown last config time)... 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Reconfiguring daemon mon.vm03 on vm03 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Reconfiguring mgr.vm03.oxmxtj (unknown last config time)... 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.oxmxtj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Reconfiguring daemon mgr.vm03.oxmxtj on vm03 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Reconfiguring ceph-exporter.vm03 (monmap changed)... 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T12:14:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: Reconfiguring daemon ceph-exporter.vm03 on vm03 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/3537903709' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:56.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:56 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3682019720' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:56.934 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T12:14:56.934 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T12:14:56.934 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 8.3646e-05 s, 6.1 MB/s 2026-03-10T12:14:56.934 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T12:14:56.996 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdc 2026-03-10T12:14:57.093 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdc 2026-03-10T12:14:57.093 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:57.093 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T12:14:57.094 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:57.094 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:57.094 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 12:14:15.925426063 +0000 2026-03-10T12:14:57.094 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 12:12:46.754075622 +0000 2026-03-10T12:14:57.094 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 12:12:46.754075622 +0000 2026-03-10T12:14:57.094 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 12:09:35.268000000 +0000 2026-03-10T12:14:57.094 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T12:14:57.171 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T12:14:57.171 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T12:14:57.171 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000449924 s, 1.1 MB/s 2026-03-10T12:14:57.172 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T12:14:57.191 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdd 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdd 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 12:14:15.953426131 +0000 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 12:12:46.787075658 +0000 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 12:12:46.787075658 +0000 2026-03-10T12:14:57.257 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 12:09:35.271000000 +0000 2026-03-10T12:14:57.257 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T12:14:57.334 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T12:14:57.334 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T12:14:57.334 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000247655 s, 2.1 MB/s 2026-03-10T12:14:57.335 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T12:14:57.403 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vde 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vde 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-10 12:14:15.979426195 +0000 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-10 12:12:46.760075628 +0000 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-10 12:12:46.760075628 +0000 2026-03-10T12:14:57.464 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-10 12:09:35.301000000 +0000 2026-03-10T12:14:57.464 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T12:14:57.532 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-10T12:14:57.532 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-10T12:14:57.532 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000138729 s, 3.7 MB/s 2026-03-10T12:14:57.536 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T12:14:57.593 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-10T12:14:57.593 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T12:14:57.608 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:14:57.608 DEBUG:teuthology.orchestra.run.vm09:> ls /dev/[sv]d? 2026-03-10T12:14:57.663 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vda 2026-03-10T12:14:57.663 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdb 2026-03-10T12:14:57.663 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdc 2026-03-10T12:14:57.663 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdd 2026-03-10T12:14:57.663 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vde 2026-03-10T12:14:57.663 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T12:14:57.663 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T12:14:57.663 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdb 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdb 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-10 12:14:42.240605890 +0000 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-10 12:12:48.073630293 +0000 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-10 12:12:48.073630293 +0000 2026-03-10T12:14:57.721 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-10 12:10:06.238000000 +0000 2026-03-10T12:14:57.722 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T12:14:57.783 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-10T12:14:57.783 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-10T12:14:57.783 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000162946 s, 3.1 MB/s 2026-03-10T12:14:57.783 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T12:14:57.840 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdc 2026-03-10T12:14:57.896 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdc 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 222 Links: 1 Device type: fc,20 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-10 12:14:42.263605919 +0000 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-10 12:12:48.051630265 +0000 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-10 12:12:48.051630265 +0000 2026-03-10T12:14:57.897 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-10 12:10:06.241000000 +0000 2026-03-10T12:14:57.897 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T12:14:57.959 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-10T12:14:57.959 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-10T12:14:57.959 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000175048 s, 2.9 MB/s 2026-03-10T12:14:57.960 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T12:14:58.016 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdd 2026-03-10T12:14:58.056 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: Reconfiguring crash.vm03 (monmap changed)... 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: Reconfiguring daemon crash.vm03 on vm03 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: Reconfiguring alertmanager.vm03 (dependencies changed)... 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: Reconfiguring daemon alertmanager.vm03 on vm03 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: Reconfiguring grafana.vm03 (dependencies changed)... 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.057 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:57 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdd 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,30 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-10 12:14:42.286605948 +0000 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-10 12:12:48.090630314 +0000 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-10 12:12:48.090630314 +0000 2026-03-10T12:14:58.073 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-10 12:10:06.243000000 +0000 2026-03-10T12:14:58.073 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: Reconfiguring crash.vm03 (monmap changed)... 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: Reconfiguring daemon crash.vm03 on vm03 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: Reconfiguring alertmanager.vm03 (dependencies changed)... 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: Reconfiguring daemon alertmanager.vm03 on vm03 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: Reconfiguring grafana.vm03 (dependencies changed)... 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.134 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:57 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:58.135 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-10T12:14:58.135 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-10T12:14:58.135 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000153087 s, 3.3 MB/s 2026-03-10T12:14:58.137 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T12:14:58.193 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vde 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vde 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 224 Links: 1 Device type: fc,40 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-10 12:14:42.308605976 +0000 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-10 12:12:48.058630274 +0000 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-10 12:12:48.058630274 +0000 2026-03-10T12:14:58.250 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-10 12:10:06.244000000 +0000 2026-03-10T12:14:58.250 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T12:14:58.312 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-10T12:14:58.312 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-10T12:14:58.312 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000157174 s, 3.3 MB/s 2026-03-10T12:14:58.313 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T12:14:58.369 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch apply osd --all-available-devices 2026-03-10T12:14:58.585 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm09/config 2026-03-10T12:14:58.860 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled osd.all-available-devices update... 2026-03-10T12:14:58.910 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-10T12:14:58.910 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:14:58.969 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: Reconfiguring daemon grafana.vm03 on vm03 2026-03-10T12:14:58.969 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-10T12:14:59.086 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:14:59.201 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: Reconfiguring daemon grafana.vm03 on vm03 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:59.202 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:58 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:14:59.240 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:58 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:14:59.302 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:14:59.372 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: Reconfiguring crash.vm09 (monmap changed)... 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: Reconfiguring daemon crash.vm09 on vm09 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='client.14254 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: Marking host: vm03 for OSDSpec preview refresh. 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: Marking host: vm09 for OSDSpec preview refresh. 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: Saving service osd.all-available-devices spec with placement * 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.pftowo", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3750563016' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T12:15:00.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:14:59 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:15:00.151 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: Reconfiguring crash.vm09 (monmap changed)... 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: Reconfiguring daemon crash.vm09 on vm09 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='client.14254 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: Marking host: vm03 for OSDSpec preview refresh. 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: Marking host: vm09 for OSDSpec preview refresh. 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: Saving service osd.all-available-devices spec with placement * 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.pftowo", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3750563016' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:00.152 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:14:59 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:15:00.373 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:00.596 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:00.878 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:00.942 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: Reconfiguring mgr.vm09.pftowo (monmap changed)... 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: Reconfiguring daemon mgr.vm09.pftowo on vm09 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: Reconfiguring mon.vm09 (monmap changed)... 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: Reconfiguring daemon mon.vm09 on vm09 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:01.392 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:00 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3735861592' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: Reconfiguring mgr.vm09.pftowo (monmap changed)... 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: Reconfiguring daemon mgr.vm09.pftowo on vm09 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: Reconfiguring mon.vm09 (monmap changed)... 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: Reconfiguring daemon mon.vm09 on vm09 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:01.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:00 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3735861592' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:01.942 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:02.167 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3079928928' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2576abe9-75cc-4a51-9524-332603ebd58e"}]: dispatch 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2576abe9-75cc-4a51-9524-332603ebd58e"}]: dispatch 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2576abe9-75cc-4a51-9524-332603ebd58e"}]': finished 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3186825143' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1d2fc1c1-1ac3-44fd-9261-4f96143ff0fe"}]: dispatch 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3186825143' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1d2fc1c1-1ac3-44fd-9261-4f96143ff0fe"}]': finished 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:02.207 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:01 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/3079928928' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2576abe9-75cc-4a51-9524-332603ebd58e"}]: dispatch 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2576abe9-75cc-4a51-9524-332603ebd58e"}]: dispatch 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2576abe9-75cc-4a51-9524-332603ebd58e"}]': finished 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3186825143' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1d2fc1c1-1ac3-44fd-9261-4f96143ff0fe"}]: dispatch 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3186825143' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1d2fc1c1-1ac3-44fd-9261-4f96143ff0fe"}]': finished 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:02.254 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:01 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:02.391 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:02.442 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1773144901,"num_remapped_pgs":0} 2026-03-10T12:15:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:02 vm09 ceph-mon[55914]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:02 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/260558065' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:02 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3693716937' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:02 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3688504210' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:03.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:02 vm03 ceph-mon[47106]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:03.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:02 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/260558065' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:03.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:02 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3693716937' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:03.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:02 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3688504210' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:03.443 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:03.604 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:03.812 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:03.868 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1773144901,"num_remapped_pgs":0} 2026-03-10T12:15:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:03 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3565056766' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:03 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3565056766' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:04.869 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:05.033 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:05.148 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:04 vm03 ceph-mon[47106]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:04 vm09 ceph-mon[55914]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:05.445 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:05.679 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1773144901,"num_remapped_pgs":0} 2026-03-10T12:15:05.994 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/3434592576' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "374e23ad-59d8-444f-b159-b2c433156803"}]: dispatch 2026-03-10T12:15:05.995 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "374e23ad-59d8-444f-b159-b2c433156803"}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3212155079' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/4175021392' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "05172499-528c-465d-8c43-f72fbfac1e15"}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "374e23ad-59d8-444f-b159-b2c433156803"}]': finished 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: osdmap e8: 3 total, 0 up, 3 in 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/4175021392' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "05172499-528c-465d-8c43-f72fbfac1e15"}]': finished 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: osdmap e9: 4 total, 0 up, 4 in 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:05 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3434592576' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "374e23ad-59d8-444f-b159-b2c433156803"}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "374e23ad-59d8-444f-b159-b2c433156803"}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3212155079' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/4175021392' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "05172499-528c-465d-8c43-f72fbfac1e15"}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "374e23ad-59d8-444f-b159-b2c433156803"}]': finished 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: osdmap e8: 3 total, 0 up, 3 in 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/4175021392' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "05172499-528c-465d-8c43-f72fbfac1e15"}]': finished 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: osdmap e9: 4 total, 0 up, 4 in 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:05 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:06.680 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:06.847 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:07.077 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:07.119 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773144905,"num_remapped_pgs":0} 2026-03-10T12:15:07.385 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:06 vm03 ceph-mon[47106]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:07.385 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:06 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3454341187' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:07.385 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:06 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2065105255' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:06 vm09 ceph-mon[55914]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:06 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/3454341187' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:06 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2065105255' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:08.120 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:08.278 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:08.312 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:08 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/4000558350' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:08.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:08 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/4000558350' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:08.505 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:08.559 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773144905,"num_remapped_pgs":0} 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3088314603' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/3459227436' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8e7d2605-017a-4d07-8a22-9ba85d0546c0"}]: dispatch 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8e7d2605-017a-4d07-8a22-9ba85d0546c0"}]: dispatch 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8e7d2605-017a-4d07-8a22-9ba85d0546c0"}]': finished 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: osdmap e10: 5 total, 0 up, 5 in 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:09.090 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:09 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3088314603' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3459227436' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8e7d2605-017a-4d07-8a22-9ba85d0546c0"}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8e7d2605-017a-4d07-8a22-9ba85d0546c0"}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8e7d2605-017a-4d07-8a22-9ba85d0546c0"}]': finished 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: osdmap e10: 5 total, 0 up, 5 in 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:09 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:09.560 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:09.713 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:09.943 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:09.999 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1773144909,"num_remapped_pgs":0} 2026-03-10T12:15:10.012 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2391469839' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d9ddcc7d-9617-4105-ad17-b95f73239b61"}]: dispatch 2026-03-10T12:15:10.012 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2391469839' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d9ddcc7d-9617-4105-ad17-b95f73239b61"}]': finished 2026-03-10T12:15:10.012 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: osdmap e11: 6 total, 0 up, 6 in 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2391469839' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d9ddcc7d-9617-4105-ad17-b95f73239b61"}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2391469839' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d9ddcc7d-9617-4105-ad17-b95f73239b61"}]': finished 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: osdmap e11: 6 total, 0 up, 6 in 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/3839989297' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3584104590' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:10 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2132833693' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3839989297' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3584104590' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:10 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2132833693' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:11.000 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:11.174 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:11.289 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:11 vm03 ceph-mon[47106]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:11 vm09 ceph-mon[55914]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:11.402 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:11.469 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1773144909,"num_remapped_pgs":0} 2026-03-10T12:15:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:12 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/968584753' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:12 vm09 ceph-mon[55914]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:12.396 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:12 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/968584753' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:12.396 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:12 vm03 ceph-mon[47106]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:12.470 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:12.675 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:12.920 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:12.968 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/818310349' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5c158131-4975-403e-b6f9-a3a54ac07128"}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5c158131-4975-403e-b6f9-a3a54ac07128"}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5c158131-4975-403e-b6f9-a3a54ac07128"}]': finished 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: osdmap e12: 7 total, 0 up, 7 in 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/85471813' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ad731394-00f1-4d01-80a4-002c1d9bb9a2"}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ad731394-00f1-4d01-80a4-002c1d9bb9a2"}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ad731394-00f1-4d01-80a4-002c1d9bb9a2"}]': finished 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: osdmap e13: 8 total, 0 up, 8 in 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:13.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/3823197021' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:13.142 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:13 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/1864962761' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/818310349' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5c158131-4975-403e-b6f9-a3a54ac07128"}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5c158131-4975-403e-b6f9-a3a54ac07128"}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5c158131-4975-403e-b6f9-a3a54ac07128"}]': finished 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: osdmap e12: 7 total, 0 up, 7 in 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/85471813' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ad731394-00f1-4d01-80a4-002c1d9bb9a2"}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ad731394-00f1-4d01-80a4-002c1d9bb9a2"}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ad731394-00f1-4d01-80a4-002c1d9bb9a2"}]': finished 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: osdmap e13: 8 total, 0 up, 8 in 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:13.205 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:13.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:13.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:13.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:13.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:13.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:13.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/3823197021' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:13.206 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:13 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/1864962761' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:13.969 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:14.136 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:14.250 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:14 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2333845903' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:14.250 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:14 vm03 ceph-mon[47106]: pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:14.375 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:14.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:14 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2333845903' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T12:15:14.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:14 vm09 ceph-mon[55914]: pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:14.437 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:15.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:15 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2164117570' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:15.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:15 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2164117570' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:15.438 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:15.609 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:15.821 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:15.871 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:16.099 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:16 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/1033230280' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:16.099 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:16 vm09 ceph-mon[55914]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:16.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:16 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/1033230280' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:16.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:16 vm03 ceph-mon[47106]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:16.872 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:17.048 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:17 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T12:15:17.048 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:17 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:17.048 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:17 vm03 ceph-mon[47106]: Deploying daemon osd.0 on vm09 2026-03-10T12:15:17.048 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:17 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T12:15:17.048 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:17 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:17.097 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:17.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:17 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T12:15:17.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:17 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:17.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:17 vm09 ceph-mon[55914]: Deploying daemon osd.0 on vm09 2026-03-10T12:15:17.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:17 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T12:15:17.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:17 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:17.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:17 vm09 ceph-mon[55914]: Deploying daemon osd.1 on vm03 2026-03-10T12:15:17.319 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:17.321 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:17 vm03 ceph-mon[47106]: Deploying daemon osd.1 on vm03 2026-03-10T12:15:17.475 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:18.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:18 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/275698769' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:18.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:18 vm03 ceph-mon[47106]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:18.247 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:18 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/275698769' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:18.247 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:18 vm09 ceph-mon[55914]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:18.476 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:18.708 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:18.996 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:19.064 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: Deploying daemon osd.2 on vm09 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: Deploying daemon osd.3 on vm03 2026-03-10T12:15:19.509 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:19 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2690734424' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: Deploying daemon osd.2 on vm09 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: Deploying daemon osd.3 on vm03 2026-03-10T12:15:19.614 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:19 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2690734424' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:20.064 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:20.275 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:20.661 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:20 vm03 ceph-mon[47106]: pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:20.661 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:20 vm03 ceph-mon[47106]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T12:15:20.668 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:20.733 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:20 vm09 ceph-mon[55914]: pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:20.733 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:20 vm09 ceph-mon[55914]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T12:15:20.765 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":14,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: osdmap e14: 8 total, 0 up, 8 in 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='osd.0 [v2:192.168.123.109:6800/3454672783,v1:192.168.123.109:6801/3454672783]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3850399370' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.522 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T12:15:21.523 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:21.523 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: Deploying daemon osd.4 on vm09 2026-03-10T12:15:21.523 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.523 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.523 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T12:15:21.523 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:21 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: osdmap e14: 8 total, 0 up, 8 in 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='osd.0 [v2:192.168.123.109:6800/3454672783,v1:192.168.123.109:6801/3454672783]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3850399370' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T12:15:21.568 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:21.569 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: Deploying daemon osd.4 on vm09 2026-03-10T12:15:21.569 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.569 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:21.569 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T12:15:21.569 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:21 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:21.766 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:22.049 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:22.329 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: Deploying daemon osd.5 on vm03 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='osd.0 [v2:192.168.123.109:6800/3454672783,v1:192.168.123.109:6801/3454672783]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: osdmap e15: 8 total, 0 up, 8 in 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3259501799' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: osdmap e16: 8 total, 0 up, 8 in 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:22.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:22 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:22.630 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":15,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: Deploying daemon osd.5 on vm03 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='osd.0 [v2:192.168.123.109:6800/3454672783,v1:192.168.123.109:6801/3454672783]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: osdmap e15: 8 total, 0 up, 8 in 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.742 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3259501799' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: osdmap e16: 8 total, 0 up, 8 in 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:22.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:22 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:23.631 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:23.647 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T12:15:23.648 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:23.648 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: Deploying daemon osd.6 on vm09 2026-03-10T12:15:23.648 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T12:15:23.648 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:23.648 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:23 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633]' entity='osd.1' 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: Deploying daemon osd.6 on vm09 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:23.782 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:23 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:23.802 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:24.046 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:24.110 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":17,"num_osds":8,"num_up_osds":2,"osd_up_since":1773144923,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: osd.0 [v2:192.168.123.109:6800/3454672783,v1:192.168.123.109:6801/3454672783] boot 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633] boot 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: osdmap e17: 8 total, 2 up, 8 in 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3391435489' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.4 [v2:192.168.123.109:6816/121770778,v1:192.168.123.109:6817/121770778]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: osdmap e18: 8 total, 2 up, 8 in 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='osd.4 [v2:192.168.123.109:6816/121770778,v1:192.168.123.109:6817/121770778]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:24.733 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:24 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: osd.0 [v2:192.168.123.109:6800/3454672783,v1:192.168.123.109:6801/3454672783] boot 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: osd.1 [v2:192.168.123.103:6802/939936633,v1:192.168.123.103:6803/939936633] boot 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: osdmap e17: 8 total, 2 up, 8 in 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:24.740 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3391435489' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.4 [v2:192.168.123.109:6816/121770778,v1:192.168.123.109:6817/121770778]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: osdmap e18: 8 total, 2 up, 8 in 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='osd.4 [v2:192.168.123.109:6816/121770778,v1:192.168.123.109:6817/121770778]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:24.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:24 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:25.110 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:25.453 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: Deploying daemon osd.7 on vm03 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056] boot 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390] boot 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: osdmap e19: 8 total, 4 up, 8 in 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:25 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: Deploying daemon osd.7 on vm03 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: osd.2 [v2:192.168.123.109:6808/4239852056,v1:192.168.123.109:6809/4239852056] boot 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: osd.3 [v2:192.168.123.103:6810/1382074390,v1:192.168.123.103:6811/1382074390] boot 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: osdmap e19: 8 total, 4 up, 8 in 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:25.824 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:25 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:25.829 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:25.945 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":19,"num_osds":8,"num_up_osds":4,"osd_up_since":1773144925,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2372239506' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: pgmap v30: 0 pgs: ; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:26.756 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:26 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2372239506' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: pgmap v30: 0 pgs: ; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:26 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:26.946 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:27.265 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:27.567 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:27.733 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":20,"num_osds":8,"num_up_osds":4,"osd_up_since":1773144925,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: osdmap e20: 8 total, 4 up, 8 in 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='osd.6 [v2:192.168.123.109:6824/120308281,v1:192.168.123.109:6825/120308281]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='osd.4 ' entity='osd.4' 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3651902606' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:27 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: osdmap e20: 8 total, 4 up, 8 in 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='osd.6 [v2:192.168.123.109:6824/120308281,v1:192.168.123.109:6825/120308281]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='osd.4 ' entity='osd.4' 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3651902606' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:27 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:28.733 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.6 [v2:192.168.123.109:6824/120308281,v1:192.168.123.109:6825/120308281]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: osd.4 [v2:192.168.123.109:6816/121770778,v1:192.168.123.109:6817/121770778] boot 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: osdmap e21: 8 total, 5 up, 8 in 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:28.862 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: pgmap v33: 1 pgs: 1 unknown; 0 B data, 505 MiB used, 79 GiB / 80 GiB avail 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: osdmap e22: 8 total, 5 up, 8 in 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:28.863 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:28 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.6 [v2:192.168.123.109:6824/120308281,v1:192.168.123.109:6825/120308281]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: osd.4 [v2:192.168.123.109:6816/121770778,v1:192.168.123.109:6817/121770778] boot 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: osdmap e21: 8 total, 5 up, 8 in 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: pgmap v33: 1 pgs: 1 unknown; 0 B data, 505 MiB used, 79 GiB / 80 GiB avail 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: osdmap e22: 8 total, 5 up, 8 in 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:28.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:28.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:28 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:29.008 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:29.369 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:29.447 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":22,"num_osds":8,"num_up_osds":5,"osd_up_since":1773144927,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:29.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:29.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:29.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/66659476' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: osd.6 [v2:192.168.123.109:6824/120308281,v1:192.168.123.109:6825/120308281] boot 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330] boot 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: osdmap e23: 8 total, 7 up, 8 in 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:29.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:29 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:30.004 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330]' entity='osd.5' 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/66659476' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: osd.6 [v2:192.168.123.109:6824/120308281,v1:192.168.123.109:6825/120308281] boot 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: osd.5 [v2:192.168.123.103:6818/2820790330,v1:192.168.123.103:6819/2820790330] boot 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: osdmap e23: 8 total, 7 up, 8 in 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:30.005 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:29 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:30.447 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd stat -f json 2026-03-10T12:15:30.659 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:30.970 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:30.988 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: pgmap v36: 1 pgs: 1 unknown; 0 B data, 944 MiB used, 139 GiB / 140 GiB avail 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210] boot 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: osdmap e24: 8 total, 8 up, 8 in 2026-03-10T12:15:30.989 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:30 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:31.033 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":24,"num_osds":8,"num_up_osds":8,"osd_up_since":1773144930,"num_in_osds":8,"osd_in_since":1773144912,"num_remapped_pgs":0} 2026-03-10T12:15:31.033 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd dump --format=json 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: pgmap v36: 1 pgs: 1 unknown; 0 B data, 944 MiB used, 139 GiB / 140 GiB avail 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: osd.7 [v2:192.168.123.103:6826/3821814210,v1:192.168.123.103:6827/3821814210] boot 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: osdmap e24: 8 total, 8 up, 8 in 2026-03-10T12:15:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:30 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T12:15:31.213 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:31.450 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:31.451 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":24,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","created":"2026-03-10T12:13:49.971783+0000","modified":"2026-03-10T12:15:30.675531+0000","last_up_change":"2026-03-10T12:15:30.675531+0000","last_in_change":"2026-03-10T12:15:12.724396+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":12,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T12:15:25.921432+0000","flags":32769,"flags_names":"hashpspool,creating","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"21","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"2576abe9-75cc-4a51-9524-332603ebd58e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6801","nonce":3454672783}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6803","nonce":3454672783}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6807","nonce":3454672783}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6805","nonce":3454672783}]},"public_addr":"192.168.123.109:6801/3454672783","cluster_addr":"192.168.123.109:6803/3454672783","heartbeat_back_addr":"192.168.123.109:6807/3454672783","heartbeat_front_addr":"192.168.123.109:6805/3454672783","state":["exists","up"]},{"osd":1,"uuid":"1d2fc1c1-1ac3-44fd-9261-4f96143ff0fe","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6803","nonce":939936633}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6805","nonce":939936633}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6809","nonce":939936633}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6807","nonce":939936633}]},"public_addr":"192.168.123.103:6803/939936633","cluster_addr":"192.168.123.103:6805/939936633","heartbeat_back_addr":"192.168.123.103:6809/939936633","heartbeat_front_addr":"192.168.123.103:6807/939936633","state":["exists","up"]},{"osd":2,"uuid":"374e23ad-59d8-444f-b159-b2c433156803","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":23,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6809","nonce":4239852056}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6811","nonce":4239852056}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6815","nonce":4239852056}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6813","nonce":4239852056}]},"public_addr":"192.168.123.109:6809/4239852056","cluster_addr":"192.168.123.109:6811/4239852056","heartbeat_back_addr":"192.168.123.109:6815/4239852056","heartbeat_front_addr":"192.168.123.109:6813/4239852056","state":["exists","up"]},{"osd":3,"uuid":"05172499-528c-465d-8c43-f72fbfac1e15","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6811","nonce":1382074390}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6813","nonce":1382074390}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6817","nonce":1382074390}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6815","nonce":1382074390}]},"public_addr":"192.168.123.103:6811/1382074390","cluster_addr":"192.168.123.103:6813/1382074390","heartbeat_back_addr":"192.168.123.103:6817/1382074390","heartbeat_front_addr":"192.168.123.103:6815/1382074390","state":["exists","up"]},{"osd":4,"uuid":"8e7d2605-017a-4d07-8a22-9ba85d0546c0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6817","nonce":121770778}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6819","nonce":121770778}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6823","nonce":121770778}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6821","nonce":121770778}]},"public_addr":"192.168.123.109:6817/121770778","cluster_addr":"192.168.123.109:6819/121770778","heartbeat_back_addr":"192.168.123.109:6823/121770778","heartbeat_front_addr":"192.168.123.109:6821/121770778","state":["exists","up"]},{"osd":5,"uuid":"d9ddcc7d-9617-4105-ad17-b95f73239b61","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6819","nonce":2820790330}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6821","nonce":2820790330}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6825","nonce":2820790330}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6823","nonce":2820790330}]},"public_addr":"192.168.123.103:6819/2820790330","cluster_addr":"192.168.123.103:6821/2820790330","heartbeat_back_addr":"192.168.123.103:6825/2820790330","heartbeat_front_addr":"192.168.123.103:6823/2820790330","state":["exists","up"]},{"osd":6,"uuid":"5c158131-4975-403e-b6f9-a3a54ac07128","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6825","nonce":120308281}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6827","nonce":120308281}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6831","nonce":120308281}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6829","nonce":120308281}]},"public_addr":"192.168.123.109:6825/120308281","cluster_addr":"192.168.123.109:6827/120308281","heartbeat_back_addr":"192.168.123.109:6831/120308281","heartbeat_front_addr":"192.168.123.109:6829/120308281","state":["exists","up"]},{"osd":7,"uuid":"ad731394-00f1-4d01-80a4-002c1d9bb9a2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6827","nonce":3821814210}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6829","nonce":3821814210}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6832","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6833","nonce":3821814210}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6831","nonce":3821814210}]},"public_addr":"192.168.123.103:6827/3821814210","cluster_addr":"192.168.123.103:6829/3821814210","heartbeat_back_addr":"192.168.123.103:6833/3821814210","heartbeat_front_addr":"192.168.123.103:6831/3821814210","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:21.659792+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:21.384187+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:23.728722+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:24.427729+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:25.435724+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:26.894189+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:27.788966+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"0.000000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/869626364":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/3512329745":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/3243700546":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/3282769927":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6801/3175473488":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/622325455":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/105398361":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/1843122351":"2026-03-11T12:14:00.767263+0000","192.168.123.103:6801/3282769927":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/3175473488":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/3675263848":"2026-03-11T12:14:11.631812+0000","192.168.123.103:0/3404900293":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/1028681040":"2026-03-11T12:14:39.862327+0000","192.168.123.103:6801/1028681040":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/4067805577":"2026-03-11T12:14:39.862327+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T12:15:31.519 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-10T12:15:25.921432+0000', 'flags': 32769, 'flags_names': 'hashpspool,creating', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '21', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-03-10T12:15:31.520 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd pool get .mgr pg_num 2026-03-10T12:15:31.698 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:31.945 INFO:teuthology.orchestra.run.vm03.stdout:pg_num: 1 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: purged_snaps scrub starts 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: purged_snaps scrub ok 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: Detected new or changed devices on vm09 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: Detected new or changed devices on vm03 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2163977436' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/1020794367' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T12:15:31.997 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:31 vm03 ceph-mon[47106]: osdmap e25: 8 total, 8 up, 8 in 2026-03-10T12:15:32.024 INFO:tasks.cephadm:Setting up client nodes... 2026-03-10T12:15:32.024 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: purged_snaps scrub starts 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: purged_snaps scrub ok 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: Detected new or changed devices on vm09 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: Detected new or changed devices on vm03 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2163977436' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/1020794367' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T12:15:32.214 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:31 vm09 ceph-mon[55914]: osdmap e25: 8 total, 8 up, 8 in 2026-03-10T12:15:32.228 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:32.259 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 sudo[74694]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-10T12:15:32.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 sudo[74694]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-10T12:15:32.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 sudo[74694]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-10T12:15:32.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 sudo[74694]: pam_unix(sudo:session): session closed for user root 2026-03-10T12:15:32.512 INFO:teuthology.orchestra.run.vm03.stdout:[client.0] 2026-03-10T12:15:32.513 INFO:teuthology.orchestra.run.vm03.stdout: key = AQBkC7BpmCXvHRAAKWsyfiV1vX4uLGGLKVMKPQ== 2026-03-10T12:15:32.583 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-10T12:15:32.583 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-10T12:15:32.583 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-10T12:15:32.619 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T12:15:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 sudo[68434]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-10T12:15:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 sudo[68434]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-10T12:15:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 sudo[68434]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-10T12:15:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 sudo[68434]: pam_unix(sudo:session): session closed for user root 2026-03-10T12:15:32.800 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm09/config 2026-03-10T12:15:33.079 INFO:teuthology.orchestra.run.vm09.stdout:[client.1] 2026-03-10T12:15:33.079 INFO:teuthology.orchestra.run.vm09.stdout: key = AQBlC7BpBB8sBBAA0hwaHgpppHG2uIHrbU5W8Q== 2026-03-10T12:15:33.130 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-10T12:15:33.130 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-10T12:15:33.130 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: pgmap v39: 1 pgs: 1 unknown; 0 B data, 971 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/737726479' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2371768410' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2371768410' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T12:15:33.193 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:32 vm09 ceph-mon[55914]: osdmap e26: 8 total, 8 up, 8 in 2026-03-10T12:15:33.208 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-10T12:15:33.208 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-10T12:15:33.208 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mgr dump --format=json 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: pgmap v39: 1 pgs: 1 unknown; 0 B data, 971 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/737726479' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2371768410' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2371768410' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T12:15:33.229 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:32 vm03 ceph-mon[47106]: osdmap e26: 8 total, 8 up, 8 in 2026-03-10T12:15:33.382 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:33.622 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:33.683 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":18,"flags":0,"active_gid":14217,"active_name":"vm03.oxmxtj","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":2478334297},{"type":"v1","addr":"192.168.123.103:6801","nonce":2478334297}]},"active_addr":"192.168.123.103:6801/2478334297","active_change":"2026-03-10T12:14:39.862574+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":14242,"name":"vm09.pftowo","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.103:8443/","prometheus":"http://192.168.123.103:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":3359196597}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":2854901130}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":872988782}]}]} 2026-03-10T12:15:33.684 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-10T12:15:33.684 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-10T12:15:33.684 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd dump --format=json 2026-03-10T12:15:33.890 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:34.122 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:34.123 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":26,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","created":"2026-03-10T12:13:49.971783+0000","modified":"2026-03-10T12:15:32.685468+0000","last_up_change":"2026-03-10T12:15:30.675531+0000","last_in_change":"2026-03-10T12:15:12.724396+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":12,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T12:15:25.921432+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"26","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"2576abe9-75cc-4a51-9524-332603ebd58e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6801","nonce":3454672783}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6803","nonce":3454672783}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6807","nonce":3454672783}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6805","nonce":3454672783}]},"public_addr":"192.168.123.109:6801/3454672783","cluster_addr":"192.168.123.109:6803/3454672783","heartbeat_back_addr":"192.168.123.109:6807/3454672783","heartbeat_front_addr":"192.168.123.109:6805/3454672783","state":["exists","up"]},{"osd":1,"uuid":"1d2fc1c1-1ac3-44fd-9261-4f96143ff0fe","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6803","nonce":939936633}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6805","nonce":939936633}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6809","nonce":939936633}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6807","nonce":939936633}]},"public_addr":"192.168.123.103:6803/939936633","cluster_addr":"192.168.123.103:6805/939936633","heartbeat_back_addr":"192.168.123.103:6809/939936633","heartbeat_front_addr":"192.168.123.103:6807/939936633","state":["exists","up"]},{"osd":2,"uuid":"374e23ad-59d8-444f-b159-b2c433156803","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":23,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6809","nonce":4239852056}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6811","nonce":4239852056}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6815","nonce":4239852056}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6813","nonce":4239852056}]},"public_addr":"192.168.123.109:6809/4239852056","cluster_addr":"192.168.123.109:6811/4239852056","heartbeat_back_addr":"192.168.123.109:6815/4239852056","heartbeat_front_addr":"192.168.123.109:6813/4239852056","state":["exists","up"]},{"osd":3,"uuid":"05172499-528c-465d-8c43-f72fbfac1e15","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6811","nonce":1382074390}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6813","nonce":1382074390}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6817","nonce":1382074390}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6815","nonce":1382074390}]},"public_addr":"192.168.123.103:6811/1382074390","cluster_addr":"192.168.123.103:6813/1382074390","heartbeat_back_addr":"192.168.123.103:6817/1382074390","heartbeat_front_addr":"192.168.123.103:6815/1382074390","state":["exists","up"]},{"osd":4,"uuid":"8e7d2605-017a-4d07-8a22-9ba85d0546c0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6817","nonce":121770778}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6819","nonce":121770778}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6823","nonce":121770778}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6821","nonce":121770778}]},"public_addr":"192.168.123.109:6817/121770778","cluster_addr":"192.168.123.109:6819/121770778","heartbeat_back_addr":"192.168.123.109:6823/121770778","heartbeat_front_addr":"192.168.123.109:6821/121770778","state":["exists","up"]},{"osd":5,"uuid":"d9ddcc7d-9617-4105-ad17-b95f73239b61","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6819","nonce":2820790330}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6821","nonce":2820790330}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6825","nonce":2820790330}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6823","nonce":2820790330}]},"public_addr":"192.168.123.103:6819/2820790330","cluster_addr":"192.168.123.103:6821/2820790330","heartbeat_back_addr":"192.168.123.103:6825/2820790330","heartbeat_front_addr":"192.168.123.103:6823/2820790330","state":["exists","up"]},{"osd":6,"uuid":"5c158131-4975-403e-b6f9-a3a54ac07128","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6825","nonce":120308281}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6827","nonce":120308281}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6831","nonce":120308281}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6829","nonce":120308281}]},"public_addr":"192.168.123.109:6825/120308281","cluster_addr":"192.168.123.109:6827/120308281","heartbeat_back_addr":"192.168.123.109:6831/120308281","heartbeat_front_addr":"192.168.123.109:6829/120308281","state":["exists","up"]},{"osd":7,"uuid":"ad731394-00f1-4d01-80a4-002c1d9bb9a2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6827","nonce":3821814210}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6829","nonce":3821814210}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6832","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6833","nonce":3821814210}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6831","nonce":3821814210}]},"public_addr":"192.168.123.103:6827/3821814210","cluster_addr":"192.168.123.103:6829/3821814210","heartbeat_back_addr":"192.168.123.103:6833/3821814210","heartbeat_front_addr":"192.168.123.103:6831/3821814210","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:21.659792+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:21.384187+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:23.728722+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:24.427729+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:25.435724+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:26.894189+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:27.788966+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:29.067111+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/869626364":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/3512329745":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/3243700546":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/3282769927":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6801/3175473488":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/622325455":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/105398361":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/1843122351":"2026-03-11T12:14:00.767263+0000","192.168.123.103:6801/3282769927":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/3175473488":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/3675263848":"2026-03-11T12:14:11.631812+0000","192.168.123.103:0/3404900293":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/1028681040":"2026-03-11T12:14:39.862327+0000","192.168.123.103:6801/1028681040":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/4067805577":"2026-03-11T12:14:39.862327+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T12:15:34.182 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:33 vm03 ceph-mon[47106]: from='client.? 192.168.123.109:0/916725306' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T12:15:34.182 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:33 vm03 ceph-mon[47106]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T12:15:34.182 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:33 vm03 ceph-mon[47106]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T12:15:34.182 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:33 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/683845467' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T12:15:34.182 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-10T12:15:34.183 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd dump --format=json 2026-03-10T12:15:34.348 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:33 vm09 ceph-mon[55914]: from='client.? 192.168.123.109:0/916725306' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T12:15:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:33 vm09 ceph-mon[55914]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T12:15:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:33 vm09 ceph-mon[55914]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T12:15:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:33 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/683845467' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T12:15:34.563 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:34.563 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":26,"fsid":"7444ff0e-1c7a-11f1-9305-473e10361f26","created":"2026-03-10T12:13:49.971783+0000","modified":"2026-03-10T12:15:32.685468+0000","last_up_change":"2026-03-10T12:15:30.675531+0000","last_in_change":"2026-03-10T12:15:12.724396+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":12,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T12:15:25.921432+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"26","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"2576abe9-75cc-4a51-9524-332603ebd58e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6801","nonce":3454672783}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6803","nonce":3454672783}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6807","nonce":3454672783}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":3454672783},{"type":"v1","addr":"192.168.123.109:6805","nonce":3454672783}]},"public_addr":"192.168.123.109:6801/3454672783","cluster_addr":"192.168.123.109:6803/3454672783","heartbeat_back_addr":"192.168.123.109:6807/3454672783","heartbeat_front_addr":"192.168.123.109:6805/3454672783","state":["exists","up"]},{"osd":1,"uuid":"1d2fc1c1-1ac3-44fd-9261-4f96143ff0fe","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6803","nonce":939936633}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6805","nonce":939936633}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6809","nonce":939936633}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":939936633},{"type":"v1","addr":"192.168.123.103:6807","nonce":939936633}]},"public_addr":"192.168.123.103:6803/939936633","cluster_addr":"192.168.123.103:6805/939936633","heartbeat_back_addr":"192.168.123.103:6809/939936633","heartbeat_front_addr":"192.168.123.103:6807/939936633","state":["exists","up"]},{"osd":2,"uuid":"374e23ad-59d8-444f-b159-b2c433156803","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":23,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6809","nonce":4239852056}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6811","nonce":4239852056}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6815","nonce":4239852056}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":4239852056},{"type":"v1","addr":"192.168.123.109:6813","nonce":4239852056}]},"public_addr":"192.168.123.109:6809/4239852056","cluster_addr":"192.168.123.109:6811/4239852056","heartbeat_back_addr":"192.168.123.109:6815/4239852056","heartbeat_front_addr":"192.168.123.109:6813/4239852056","state":["exists","up"]},{"osd":3,"uuid":"05172499-528c-465d-8c43-f72fbfac1e15","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6811","nonce":1382074390}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6813","nonce":1382074390}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6817","nonce":1382074390}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":1382074390},{"type":"v1","addr":"192.168.123.103:6815","nonce":1382074390}]},"public_addr":"192.168.123.103:6811/1382074390","cluster_addr":"192.168.123.103:6813/1382074390","heartbeat_back_addr":"192.168.123.103:6817/1382074390","heartbeat_front_addr":"192.168.123.103:6815/1382074390","state":["exists","up"]},{"osd":4,"uuid":"8e7d2605-017a-4d07-8a22-9ba85d0546c0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6817","nonce":121770778}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6819","nonce":121770778}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6823","nonce":121770778}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":121770778},{"type":"v1","addr":"192.168.123.109:6821","nonce":121770778}]},"public_addr":"192.168.123.109:6817/121770778","cluster_addr":"192.168.123.109:6819/121770778","heartbeat_back_addr":"192.168.123.109:6823/121770778","heartbeat_front_addr":"192.168.123.109:6821/121770778","state":["exists","up"]},{"osd":5,"uuid":"d9ddcc7d-9617-4105-ad17-b95f73239b61","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6819","nonce":2820790330}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6821","nonce":2820790330}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6825","nonce":2820790330}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":2820790330},{"type":"v1","addr":"192.168.123.103:6823","nonce":2820790330}]},"public_addr":"192.168.123.103:6819/2820790330","cluster_addr":"192.168.123.103:6821/2820790330","heartbeat_back_addr":"192.168.123.103:6825/2820790330","heartbeat_front_addr":"192.168.123.103:6823/2820790330","state":["exists","up"]},{"osd":6,"uuid":"5c158131-4975-403e-b6f9-a3a54ac07128","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6825","nonce":120308281}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6827","nonce":120308281}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6831","nonce":120308281}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":120308281},{"type":"v1","addr":"192.168.123.109:6829","nonce":120308281}]},"public_addr":"192.168.123.109:6825/120308281","cluster_addr":"192.168.123.109:6827/120308281","heartbeat_back_addr":"192.168.123.109:6831/120308281","heartbeat_front_addr":"192.168.123.109:6829/120308281","state":["exists","up"]},{"osd":7,"uuid":"ad731394-00f1-4d01-80a4-002c1d9bb9a2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6827","nonce":3821814210}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6829","nonce":3821814210}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6832","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6833","nonce":3821814210}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":3821814210},{"type":"v1","addr":"192.168.123.103:6831","nonce":3821814210}]},"public_addr":"192.168.123.103:6827/3821814210","cluster_addr":"192.168.123.103:6829/3821814210","heartbeat_back_addr":"192.168.123.103:6833/3821814210","heartbeat_front_addr":"192.168.123.103:6831/3821814210","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:21.659792+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:21.384187+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:23.728722+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:24.427729+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:25.435724+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:26.894189+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:27.788966+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T12:15:29.067111+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/869626364":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/3512329745":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/3243700546":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/3282769927":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6801/3175473488":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/622325455":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/105398361":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/1843122351":"2026-03-11T12:14:00.767263+0000","192.168.123.103:6801/3282769927":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/3175473488":"2026-03-11T12:14:00.767263+0000","192.168.123.103:0/3675263848":"2026-03-11T12:14:11.631812+0000","192.168.123.103:0/3404900293":"2026-03-11T12:14:11.631812+0000","192.168.123.103:6800/1028681040":"2026-03-11T12:14:39.862327+0000","192.168.123.103:6801/1028681040":"2026-03-11T12:14:39.862327+0000","192.168.123.103:0/4067805577":"2026-03-11T12:14:39.862327+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T12:15:34.615 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.0 flush_pg_stats 2026-03-10T12:15:34.615 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.1 flush_pg_stats 2026-03-10T12:15:34.615 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.2 flush_pg_stats 2026-03-10T12:15:34.615 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.3 flush_pg_stats 2026-03-10T12:15:34.615 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.4 flush_pg_stats 2026-03-10T12:15:34.616 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.5 flush_pg_stats 2026-03-10T12:15:34.616 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.6 flush_pg_stats 2026-03-10T12:15:34.616 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph tell osd.7 flush_pg_stats 2026-03-10T12:15:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:34 vm03 ceph-mon[47106]: pgmap v41: 1 pgs: 1 unknown; 0 B data, 571 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:34 vm03 ceph-mon[47106]: mgrmap e19: vm03.oxmxtj(active, since 54s), standbys: vm09.pftowo 2026-03-10T12:15:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:34 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/109083853' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T12:15:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:34 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2964897710' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T12:15:35.262 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.287 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.343 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.345 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:34 vm09 ceph-mon[55914]: pgmap v41: 1 pgs: 1 unknown; 0 B data, 571 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:34 vm09 ceph-mon[55914]: mgrmap e19: vm03.oxmxtj(active, since 54s), standbys: vm09.pftowo 2026-03-10T12:15:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:34 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/109083853' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T12:15:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:34 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2964897710' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T12:15:35.399 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.546 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.564 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.581 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:35.788 INFO:teuthology.orchestra.run.vm03.stdout:73014444036 2026-03-10T12:15:35.788 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.1 2026-03-10T12:15:35.947 INFO:teuthology.orchestra.run.vm03.stdout:98784247811 2026-03-10T12:15:35.947 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.6 2026-03-10T12:15:36.084 INFO:teuthology.orchestra.run.vm03.stdout:81604378628 2026-03-10T12:15:36.084 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.3 2026-03-10T12:15:36.338 INFO:teuthology.orchestra.run.vm03.stdout:98784247811 2026-03-10T12:15:36.338 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.5 2026-03-10T12:15:36.344 INFO:teuthology.orchestra.run.vm03.stdout:81604378627 2026-03-10T12:15:36.344 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.2 2026-03-10T12:15:36.355 INFO:teuthology.orchestra.run.vm03.stdout:103079215106 2026-03-10T12:15:36.355 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.7 2026-03-10T12:15:36.365 INFO:teuthology.orchestra.run.vm03.stdout:73014444036 2026-03-10T12:15:36.365 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.0 2026-03-10T12:15:36.406 INFO:teuthology.orchestra.run.vm03.stdout:90194313219 2026-03-10T12:15:36.407 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.4 2026-03-10T12:15:36.436 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:36.823 INFO:teuthology.orchestra.run.vm03.stdout:73014444036 2026-03-10T12:15:36.969 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444036 got 73014444036 for osd.1 2026-03-10T12:15:36.970 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:36.988 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:37.001 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:37.084 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:37.097 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:36 vm03 ceph-mon[47106]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:37.097 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:36 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/689484254' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T12:15:37.228 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:36 vm09 ceph-mon[55914]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:36 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/689484254' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T12:15:37.407 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:37.489 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:37.505 INFO:teuthology.orchestra.run.vm03.stdout:98784247811 2026-03-10T12:15:37.518 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:37.685 INFO:teuthology.orchestra.run.vm03.stdout:81604378628 2026-03-10T12:15:37.732 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247811 got 98784247811 for osd.6 2026-03-10T12:15:37.732 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:37.834 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378628 got 81604378628 for osd.3 2026-03-10T12:15:37.834 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:37.886 INFO:teuthology.orchestra.run.vm03.stdout:98784247810 2026-03-10T12:15:38.003 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247811 got 98784247810 for osd.5 2026-03-10T12:15:38.028 INFO:teuthology.orchestra.run.vm03.stdout:73014444036 2026-03-10T12:15:38.062 INFO:teuthology.orchestra.run.vm03.stdout:90194313219 2026-03-10T12:15:38.082 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444036 got 73014444036 for osd.0 2026-03-10T12:15:38.083 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:38.145 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:37 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/467738233' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T12:15:38.145 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:37 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2872635961' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T12:15:38.145 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:37 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2492423051' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T12:15:38.172 INFO:teuthology.orchestra.run.vm03.stdout:103079215107 2026-03-10T12:15:38.199 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313219 got 90194313219 for osd.4 2026-03-10T12:15:38.199 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:38.228 INFO:teuthology.orchestra.run.vm03.stdout:81604378628 2026-03-10T12:15:38.228 INFO:tasks.cephadm.ceph_manager.ceph:need seq 103079215106 got 103079215107 for osd.7 2026-03-10T12:15:38.228 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:38.272 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378627 got 81604378628 for osd.2 2026-03-10T12:15:38.272 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:37 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/467738233' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T12:15:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:37 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2872635961' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T12:15:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:37 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2492423051' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T12:15:39.004 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph osd last-stat-seq osd.5 2026-03-10T12:15:39.174 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:39.196 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:38 vm03 ceph-mon[47106]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:39.196 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:38 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/1958763490' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T12:15:39.196 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:38 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/155735182' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T12:15:39.196 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:38 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/3769010988' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T12:15:39.196 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:38 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/1953099636' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T12:15:39.387 INFO:teuthology.orchestra.run.vm03.stdout:98784247811 2026-03-10T12:15:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:38 vm09 ceph-mon[55914]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:38 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/1958763490' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T12:15:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:38 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/155735182' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T12:15:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:38 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/3769010988' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T12:15:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:38 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/1953099636' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T12:15:39.437 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247811 got 98784247811 for osd.5 2026-03-10T12:15:39.438 DEBUG:teuthology.parallel:result is None 2026-03-10T12:15:39.438 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-10T12:15:39.438 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph pg dump --format=json 2026-03-10T12:15:39.598 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:39.807 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:39.807 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-10T12:15:39.852 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":43,"stamp":"2026-03-10T12:15:37.877078+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":628052,"kb_used_data":3308,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167111340,"statfs":{"total":171765137408,"available":171122012160,"internally_reserved":0,"allocated":3387392,"data_stored":2126576,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12711,"internal_metadata":219663961},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"2.000253"},"pg_stats":[{"pgid":"1.0","version":"25'32","reported_seq":57,"reported_epoch":26,"state":"active+clean","last_fresh":"2026-03-10T12:15:33.740552+0000","last_change":"2026-03-10T12:15:31.898898+0000","last_active":"2026-03-10T12:15:33.740552+0000","last_peered":"2026-03-10T12:15:33.740552+0000","last_clean":"2026-03-10T12:15:33.740552+0000","last_became_active":"2026-03-10T12:15:31.897832+0000","last_became_peered":"2026-03-10T12:15:31.897832+0000","last_unstale":"2026-03-10T12:15:33.740552+0000","last_undegraded":"2026-03-10T12:15:33.740552+0000","last_fullsized":"2026-03-10T12:15:33.740552+0000","mapping_epoch":24,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":25,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T12:15:26.614629+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T12:15:26.614629+0000","last_clean_scrub_stamp":"2026-03-10T12:15:26.614629+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T16:40:11.506589+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,2],"acting":[6,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":24,"seq":103079215107,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27084,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940340,"statfs":{"total":21470642176,"available":21442908160,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":23,"seq":98784247811,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27668,"kb_used_data":696,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939756,"statfs":{"total":21470642176,"available":21442310144,"internally_reserved":0,"allocated":712704,"data_stored":552872,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":23,"seq":98784247811,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":437272,"kb_used_data":696,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530152,"statfs":{"total":21470642176,"available":21022875648,"internally_reserved":0,"allocated":712704,"data_stored":552872,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":21,"seq":90194313219,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27088,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940336,"statfs":{"total":21470642176,"available":21442904064,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378628,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27664,"kb_used_data":696,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939760,"statfs":{"total":21470642176,"available":21442314240,"internally_reserved":0,"allocated":712704,"data_stored":552872,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":19,"seq":81604378628,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":17,"seq":73014444036,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":17,"seq":73014444036,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T12:15:39.853 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph pg dump --format=json 2026-03-10T12:15:40.030 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:40.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:39 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/2413486353' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T12:15:40.055 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:39 vm03 ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:40.244 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:40.244 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-10T12:15:40.284 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":44,"stamp":"2026-03-10T12:15:39.877315+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":628052,"kb_used_data":3308,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167111340,"statfs":{"total":171765137408,"available":171122012160,"internally_reserved":0,"allocated":3387392,"data_stored":2126576,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12711,"internal_metadata":219663961},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"4.000490"},"pg_stats":[{"pgid":"1.0","version":"25'32","reported_seq":57,"reported_epoch":26,"state":"active+clean","last_fresh":"2026-03-10T12:15:33.740552+0000","last_change":"2026-03-10T12:15:31.898898+0000","last_active":"2026-03-10T12:15:33.740552+0000","last_peered":"2026-03-10T12:15:33.740552+0000","last_clean":"2026-03-10T12:15:33.740552+0000","last_became_active":"2026-03-10T12:15:31.897832+0000","last_became_peered":"2026-03-10T12:15:31.897832+0000","last_unstale":"2026-03-10T12:15:33.740552+0000","last_undegraded":"2026-03-10T12:15:33.740552+0000","last_fullsized":"2026-03-10T12:15:33.740552+0000","mapping_epoch":24,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":25,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T12:15:26.614629+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T12:15:26.614629+0000","last_clean_scrub_stamp":"2026-03-10T12:15:26.614629+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T16:40:11.506589+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,2],"acting":[6,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":24,"seq":103079215107,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27084,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940340,"statfs":{"total":21470642176,"available":21442908160,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":23,"seq":98784247812,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27668,"kb_used_data":696,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939756,"statfs":{"total":21470642176,"available":21442310144,"internally_reserved":0,"allocated":712704,"data_stored":552872,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":23,"seq":98784247812,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":437272,"kb_used_data":696,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530152,"statfs":{"total":21470642176,"available":21022875648,"internally_reserved":0,"allocated":712704,"data_stored":552872,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":21,"seq":90194313220,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27088,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940336,"statfs":{"total":21470642176,"available":21442904064,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378628,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27664,"kb_used_data":696,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939760,"statfs":{"total":21470642176,"available":21442314240,"internally_reserved":0,"allocated":712704,"data_stored":552872,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":19,"seq":81604378628,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":17,"seq":73014444037,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":17,"seq":73014444037,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27092,"kb_used_data":244,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940332,"statfs":{"total":21470642176,"available":21442899968,"internally_reserved":0,"allocated":249856,"data_stored":93592,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T12:15:40.284 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-10T12:15:40.284 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-10T12:15:40.284 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-10T12:15:40.284 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph health --format=json 2026-03-10T12:15:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:39 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/2413486353' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T12:15:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:39 vm09 ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:40.450 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:40.680 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:40.680 INFO:teuthology.orchestra.run.vm03.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-10T12:15:40.726 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-10T12:15:40.726 INFO:tasks.cephadm:Setup complete, yielding 2026-03-10T12:15:40.726 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T12:15:40.728 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-10T12:15:40.728 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch status' 2026-03-10T12:15:40.886 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:41.097 INFO:teuthology.orchestra.run.vm03.stdout:Backend: cephadm 2026-03-10T12:15:41.097 INFO:teuthology.orchestra.run.vm03.stdout:Available: Yes 2026-03-10T12:15:41.097 INFO:teuthology.orchestra.run.vm03.stdout:Paused: No 2026-03-10T12:15:41.158 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:40 vm03 ceph-mon[47106]: from='client.14526 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:41.158 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:40 vm03 ceph-mon[47106]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:41.158 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:40 vm03 ceph-mon[47106]: from='client.? 192.168.123.103:0/348558633' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T12:15:41.159 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch ps' 2026-03-10T12:15:41.321 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:41.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:40 vm09 ceph-mon[55914]: from='client.14526 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:41.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:40 vm09 ceph-mon[55914]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:41.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:40 vm09 ceph-mon[55914]: from='client.? 192.168.123.103:0/348558633' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager.vm03 vm03 *:9093,9094 running (44s) 11s ago 77s 20.8M - 0.25.0 c8568f914cd2 55c3a7e1ea2e 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm03 vm03 *:9926 running (84s) 11s ago 84s 8686k - 19.2.3-678-ge911bdeb 654f31e6858e 35efa696e1b7 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm09 vm09 *:9926 running (57s) 12s ago 57s 6559k - 19.2.3-678-ge911bdeb 654f31e6858e 996319fa5e1a 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm03 vm03 running (83s) 11s ago 83s 7612k - 19.2.3-678-ge911bdeb 654f31e6858e 8ec21e01bd5e 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm09 vm09 running (56s) 12s ago 56s 7612k - 19.2.3-678-ge911bdeb 654f31e6858e 1c1b7bfaa8f6 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:grafana.vm03 vm03 *:3000 running (43s) 11s ago 71s 80.6M - 10.4.0 c8b91775d855 2640e77d8ce9 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm03.oxmxtj vm03 *:9283,8765,8443 running (110s) 11s ago 109s 545M - 19.2.3-678-ge911bdeb 654f31e6858e 0fc4978a7cf9 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm09.pftowo vm09 *:8443,9283,8765 running (53s) 12s ago 53s 488M - 19.2.3-678-ge911bdeb 654f31e6858e 2008d1c4f3e8 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm03 vm03 running (110s) 11s ago 111s 47.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e2f02b4b2305 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm09 vm09 running (52s) 12s ago 52s 42.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 71f1d5b5c98c 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm03 vm03 *:9100 running (80s) 11s ago 80s 9118k - 1.7.0 72c9c2088986 60adf9e9ea4d 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm09 vm09 *:9100 running (54s) 12s ago 54s 9080k - 1.7.0 72c9c2088986 5dcad22f7bd1 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:osd.0 vm09 running (23s) 12s ago 23s 29.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e9c4c6f3e676 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:osd.1 vm03 running (22s) 11s ago 22s 52.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 11ac912e0f2c 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:osd.2 vm09 running (20s) 12s ago 20s 32.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e fb910104e413 2026-03-10T12:15:41.548 INFO:teuthology.orchestra.run.vm03.stdout:osd.3 vm03 running (20s) 11s ago 20s 30.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 36f5c304c0ef 2026-03-10T12:15:41.549 INFO:teuthology.orchestra.run.vm03.stdout:osd.4 vm09 running (18s) 12s ago 18s 53.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d9e3c8311bd5 2026-03-10T12:15:41.549 INFO:teuthology.orchestra.run.vm03.stdout:osd.5 vm03 running (17s) 11s ago 17s 36.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e c9971542bd3a 2026-03-10T12:15:41.549 INFO:teuthology.orchestra.run.vm03.stdout:osd.6 vm09 running (16s) 12s ago 16s 24.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d6eae2d41cbd 2026-03-10T12:15:41.549 INFO:teuthology.orchestra.run.vm03.stdout:osd.7 vm03 running (14s) 11s ago 14s 26.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 113dcc0c2e61 2026-03-10T12:15:41.549 INFO:teuthology.orchestra.run.vm03.stdout:prometheus.vm03 vm03 *:9095 running (43s) 11s ago 65s 31.2M - 2.51.0 1d3b7f56885b 6da4090bac97 2026-03-10T12:15:41.618 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch ls' 2026-03-10T12:15:41.775 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager ?:9093,9094 1/1 12s ago 95s count:1 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter ?:9926 2/2 12s ago 96s * 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:crash 2/2 12s ago 96s * 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:grafana ?:3000 1/1 12s ago 95s count:1 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:mgr 2/2 12s ago 96s count:2 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:mon 2/2 12s ago 81s vm03:192.168.123.103=vm03;vm09:192.168.123.109=vm09;count:2 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter ?:9100 2/2 12s ago 95s * 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:osd.all-available-devices 8 12s ago 43s * 2026-03-10T12:15:41.980 INFO:teuthology.orchestra.run.vm03.stdout:prometheus ?:9095 1/1 12s ago 96s count:1 2026-03-10T12:15:42.041 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch host ls' 2026-03-10T12:15:42.061 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:41 vm03 ceph-mon[47106]: from='client.14530 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:42.197 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:41 vm09 ceph-mon[55914]: from='client.14530 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:42.402 INFO:teuthology.orchestra.run.vm03.stdout:HOST ADDR LABELS STATUS 2026-03-10T12:15:42.402 INFO:teuthology.orchestra.run.vm03.stdout:vm03 192.168.123.103 2026-03-10T12:15:42.402 INFO:teuthology.orchestra.run.vm03.stdout:vm09 192.168.123.109 2026-03-10T12:15:42.402 INFO:teuthology.orchestra.run.vm03.stdout:2 hosts in cluster 2026-03-10T12:15:42.466 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch device ls' 2026-03-10T12:15:42.668 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:42.913 INFO:teuthology.orchestra.run.vm03.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 12s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdb hdd DWNBRSTVMM03001 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdc hdd DWNBRSTVMM03002 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdd hdd DWNBRSTVMM03003 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vde hdd DWNBRSTVMM03004 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 12s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdb hdd DWNBRSTVMM09001 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdc hdd DWNBRSTVMM09002 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdd hdd DWNBRSTVMM09003 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.914 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vde hdd DWNBRSTVMM09004 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:42.976 INFO:teuthology.run_tasks:Running task vip... 2026-03-10T12:15:42.979 INFO:tasks.vip:Allocating static IPs for each host... 2026-03-10T12:15:42.979 INFO:tasks.vip:peername 192.168.123.103 2026-03-10T12:15:42.979 INFO:tasks.vip:192.168.123.103 in 192.168.123.0/24, pos 102 2026-03-10T12:15:42.980 INFO:tasks.vip:vm03.local static 12.12.0.103, vnet 12.12.0.0/22 2026-03-10T12:15:42.980 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.103')] 2026-03-10T12:15:42.980 DEBUG:teuthology.orchestra.run.vm03:> sudo ip route ls 2026-03-10T12:15:43.001 INFO:teuthology.orchestra.run.vm03.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.103 metric 100 2026-03-10T12:15:43.001 INFO:teuthology.orchestra.run.vm03.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.103 metric 100 2026-03-10T12:15:43.002 INFO:tasks.vip:Configuring 12.12.0.103 on vm03.local iface eth0... 2026-03-10T12:15:43.003 DEBUG:teuthology.orchestra.run.vm03:> sudo ip addr add 12.12.0.103/22 dev eth0 2026-03-10T12:15:43.068 INFO:tasks.vip:peername 192.168.123.109 2026-03-10T12:15:43.069 INFO:tasks.vip:192.168.123.109 in 192.168.123.0/24, pos 108 2026-03-10T12:15:43.069 INFO:tasks.vip:vm09.local static 12.12.0.109, vnet 12.12.0.0/22 2026-03-10T12:15:43.069 DEBUG:teuthology.orchestra.run.vm09:> sudo ip route ls 2026-03-10T12:15:43.093 INFO:teuthology.orchestra.run.vm09.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.109 metric 100 2026-03-10T12:15:43.094 INFO:teuthology.orchestra.run.vm09.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.109 metric 100 2026-03-10T12:15:43.094 INFO:tasks.vip:Configuring 12.12.0.109 on vm09.local iface eth0... 2026-03-10T12:15:43.095 DEBUG:teuthology.orchestra.run.vm09:> sudo ip addr add 12.12.0.109/22 dev eth0 2026-03-10T12:15:43.161 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T12:15:43.163 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-10T12:15:43.163 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch device ls --refresh' 2026-03-10T12:15:43.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:42 vm09 ceph-mon[55914]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:43.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:42 vm09 ceph-mon[55914]: from='client.14542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:43.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:42 vm09 ceph-mon[55914]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:43.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:42 vm09 ceph-mon[55914]: from='client.14546 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:43.335 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:43.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:42 vm03 ceph-mon[47106]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:43.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:42 vm03 ceph-mon[47106]: from='client.14542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:43.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:42 vm03 ceph-mon[47106]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:43.358 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:42 vm03 ceph-mon[47106]: from='client.14546 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 12s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdb hdd DWNBRSTVMM03001 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdc hdd DWNBRSTVMM03002 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdd hdd DWNBRSTVMM03003 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vde hdd DWNBRSTVMM03004 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 13s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdb hdd DWNBRSTVMM09001 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdc hdd DWNBRSTVMM09002 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdd hdd DWNBRSTVMM09003 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.561 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vde hdd DWNBRSTVMM09004 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:15:43.638 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-03-10T12:15:43.641 INFO:tasks.cephadm:Applying spec(s): placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 --- placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: 12.12.1.103/22 2026-03-10T12:15:43.642 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch apply -i - 2026-03-10T12:15:43.850 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:44.099 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:43 vm09.local ceph-mon[55914]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:44.099 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:43 vm09.local ceph-mon[55914]: from='client.14554 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:44.099 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:43 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:15:44.144 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:43 vm03.local ceph-mon[47106]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:44.144 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:43 vm03.local ceph-mon[47106]: from='client.14554 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:44.144 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:43 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:15:44.144 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled rgw.foo update... 2026-03-10T12:15:44.145 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled ingress.rgw.foo update... 2026-03-10T12:15:44.230 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-10T12:15:44.232 INFO:tasks.cephadm:Waiting for ceph service rgw.foo to start (timeout 300)... 2026-03-10T12:15:44.233 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:44.465 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:44.728 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:44.728 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:29.850395Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:29.850304Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:29.850337Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:29.850422Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.140216Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:29.850269Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:29.850214Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:29.850367Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:29.850477Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:29.850450Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.134628Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:44.130391Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:44.792 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:44.977 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:44.977 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:44.977 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:44.977 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:44.977 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:44 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:44 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:45.793 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:45.982 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:46.226 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:46.226 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.140216Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.134628Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:44.130391Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: Saving service rgw.foo spec with placement count:4;* 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: Saving service ingress.rgw.foo spec with placement count:2 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.233 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:45 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.329 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: Saving service rgw.foo spec with placement count:4;* 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: Saving service ingress.rgw.foo spec with placement count:2 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:45 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.wcqnzb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.wcqnzb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.324 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:46 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:47.330 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.wcqnzb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.wcqnzb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:46 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:47.574 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:47.970 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:47.970 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.140216Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:47.106978Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:44.130391Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:48.041 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: Deploying daemon rgw.foo.vm09.wcqnzb on vm09 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.pqsxbr", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.pqsxbr", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: Deploying daemon rgw.foo.vm09.wcqnzb on vm09 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.pqsxbr", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.pqsxbr", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:48.255 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.042 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: Deploying daemon rgw.foo.vm03.pqsxbr on vm03 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='client.24343 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.jddmdl", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.jddmdl", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: osdmap e27: 8 total, 8 up, 8 in 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1210804710' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/2890258282' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.yhnrdc", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.yhnrdc", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:49.354 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: Deploying daemon rgw.foo.vm03.pqsxbr on vm03 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='client.24343 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.jddmdl", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.jddmdl", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: osdmap e27: 8 total, 8 up, 8 in 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1210804710' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/2890258282' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.yhnrdc", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm03.yhnrdc", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:15:49.637 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:49.637 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.140216Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:48.814285Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:44.130391Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:49.718 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: Deploying daemon rgw.foo.vm09.jddmdl on vm09 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: Deploying daemon rgw.foo.vm03.yhnrdc on vm03 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1210804710' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: osdmap e28: 8 total, 8 up, 8 in 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:50 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: Deploying daemon rgw.foo.vm09.jddmdl on vm09 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: Deploying daemon rgw.foo.vm03.yhnrdc on vm03 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1210804710' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: osdmap e28: 8 total, 8 up, 8 in 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:50 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:50.719 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:50.886 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:51.121 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:51.121 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.140216Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:51.211 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: from='client.14598 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: Saving service rgw.foo spec with placement count:4;* 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: Deploying daemon haproxy.rgw.foo.vm09.mpsxsc on vm09 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: pgmap v51: 33 pgs: 23 active+clean, 3 creating+peering, 7 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 2.6 KiB/s rd, 767 B/s wr, 4 op/s 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: osdmap e29: 8 total, 8 up, 8 in 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:51 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: from='client.14598 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: Saving service rgw.foo spec with placement count:4;* 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: Deploying daemon haproxy.rgw.foo.vm09.mpsxsc on vm09 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: pgmap v51: 33 pgs: 23 active+clean, 3 creating+peering, 7 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 2.6 KiB/s rd, 767 B/s wr, 4 op/s 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: osdmap e29: 8 total, 8 up, 8 in 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:51 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T12:15:52.212 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:52.394 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:52.412 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:52 vm03.local ceph-mon[47106]: from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:52.412 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:52 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.412 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:52 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.412 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:52 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.412 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:52 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.412 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:52 vm03.local ceph-mon[47106]: osdmap e30: 8 total, 8 up, 8 in 2026-03-10T12:15:52.412 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:52 vm03.local ceph-mon[47106]: pgmap v54: 65 pgs: 35 active+clean, 9 creating+peering, 21 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 2.2 KiB/s wr, 13 op/s 2026-03-10T12:15:52.621 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:52.621 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:44.140216Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:52 vm09.local ceph-mon[55914]: from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:52 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:52 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:52 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:52 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T12:15:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:52 vm09.local ceph-mon[55914]: osdmap e30: 8 total, 8 up, 8 in 2026-03-10T12:15:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:52 vm09.local ceph-mon[55914]: pgmap v54: 65 pgs: 35 active+clean, 9 creating+peering, 21 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 2.2 KiB/s wr, 13 op/s 2026-03-10T12:15:52.687 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: osdmap e31: 8 total, 8 up, 8 in 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.275 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:53 vm09.local ceph-mon[55914]: from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: osdmap e31: 8 total, 8 up, 8 in 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T12:15:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:53 vm03.local ceph-mon[47106]: from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:53.688 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:53.879 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:54.111 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:54.111 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:53.644808Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:54.170 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: osdmap e32: 8 total, 8 up, 8 in 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: Deploying daemon haproxy.rgw.foo.vm03.oeugxe on vm03 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: pgmap v57: 97 pgs: 52 active+clean, 6 creating+peering, 39 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 767 B/s wr, 4 op/s 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: osdmap e33: 8 total, 8 up, 8 in 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: osdmap e32: 8 total, 8 up, 8 in 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: Deploying daemon haproxy.rgw.foo.vm03.oeugxe on vm03 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: pgmap v57: 97 pgs: 52 active+clean, 6 creating+peering, 39 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 767 B/s wr, 4 op/s 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: osdmap e33: 8 total, 8 up, 8 in 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T12:15:55.171 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:55.393 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:55.657 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:55.657 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:53.644808Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: osdmap e34: 8 total, 8 up, 8 in 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.708 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.732 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: osdmap e34: 8 total, 8 up, 8 in 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/890716867' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.109:0/18335673' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T12:15:56.733 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:56.836 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:56 vm03.local ceph-mon[47106]: from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:56.837 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:56 vm03.local ceph-mon[47106]: pgmap v60: 129 pgs: 99 active+clean, 13 creating+peering, 17 unknown; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 8 op/s 2026-03-10T12:15:56.837 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:56 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.837 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:56 vm03.local ceph-mon[47106]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.837 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:56 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.837 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:56 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.837 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:56 vm03.local ceph-mon[47106]: osdmap e35: 8 total, 8 up, 8 in 2026-03-10T12:15:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:56 vm09.local ceph-mon[55914]: from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:56 vm09.local ceph-mon[55914]: pgmap v60: 129 pgs: 99 active+clean, 13 creating+peering, 17 unknown; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 511 B/s wr, 8 op/s 2026-03-10T12:15:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:56 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.jddmdl' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:56 vm09.local ceph-mon[55914]: from='client.? ' entity='client.rgw.foo.vm09.wcqnzb' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:56 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1722274217' entity='client.rgw.foo.vm03.pqsxbr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:56 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3195179886' entity='client.rgw.foo.vm03.yhnrdc' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T12:15:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:56 vm09.local ceph-mon[55914]: osdmap e35: 8 total, 8 up, 8 in 2026-03-10T12:15:57.337 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:57.915 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:57.915 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:53.644808Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:58.000 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:15:59.001 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:15:59.171 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:15:59.203 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:58 vm03.local ceph-mon[47106]: pgmap v62: 129 pgs: 110 active+clean, 13 creating+peering, 6 unknown; 451 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 1.7 KiB/s wr, 21 op/s 2026-03-10T12:15:59.204 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:58 vm03.local ceph-mon[47106]: from='client.24385 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:59.204 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.204 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.204 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.204 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:58 vm09.local ceph-mon[55914]: pgmap v62: 129 pgs: 110 active+clean, 13 creating+peering, 6 unknown; 451 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 1.7 KiB/s wr, 21 op/s 2026-03-10T12:15:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:58 vm09.local ceph-mon[55914]: from='client.24385 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:15:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:15:59.404 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:15:59.405 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:58.117300Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:15:59.472 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:16:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:59 vm09.local ceph-mon[55914]: 12.12.1.103 is in 12.12.0.0/22 on vm09 interface eth0 2026-03-10T12:16:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:59 vm09.local ceph-mon[55914]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-10T12:16:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:15:59 vm09.local ceph-mon[55914]: Deploying daemon keepalived.rgw.foo.vm09.ecnqjx on vm09 2026-03-10T12:16:00.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:59 vm03.local ceph-mon[47106]: 12.12.1.103 is in 12.12.0.0/22 on vm09 interface eth0 2026-03-10T12:16:00.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:59 vm03.local ceph-mon[47106]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-10T12:16:00.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:15:59 vm03.local ceph-mon[47106]: Deploying daemon keepalived.rgw.foo.vm09.ecnqjx on vm09 2026-03-10T12:16:00.473 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:16:00.659 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:00.883 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:16:00.883 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:58.117300Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:16:00.953 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:16:01.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:00 vm03.local ceph-mon[47106]: from='client.24387 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:00 vm03.local ceph-mon[47106]: pgmap v63: 129 pgs: 129 active+clean; 453 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 242 KiB/s rd, 4.7 KiB/s wr, 431 op/s 2026-03-10T12:16:01.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:00 vm09.local ceph-mon[55914]: from='client.24387 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:01.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:00 vm09.local ceph-mon[55914]: pgmap v63: 129 pgs: 129 active+clean; 453 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 242 KiB/s rd, 4.7 KiB/s wr, 431 op/s 2026-03-10T12:16:01.953 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:16:02.053 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:02 vm09.local ceph-mon[55914]: from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:02.141 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:02.258 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:02 vm03.local ceph-mon[47106]: from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:02.387 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:16:02.387 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:16:02.315147Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:16:02.437 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:16:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:03 vm09.local ceph-mon[55914]: pgmap v64: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 265 KiB/s rd, 4.4 KiB/s wr, 464 op/s 2026-03-10T12:16:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:03 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:03 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:03 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:03.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:03 vm03.local ceph-mon[47106]: pgmap v64: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 265 KiB/s rd, 4.4 KiB/s wr, 464 op/s 2026-03-10T12:16:03.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:03 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:03.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:03 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:03.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:03 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:03.438 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:16:03.627 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:03.935 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:16:03.935 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:16:02.315147Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:16:04.003 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:16:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:04 vm09.local ceph-mon[55914]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-10T12:16:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:04 vm09.local ceph-mon[55914]: 12.12.1.103 is in 12.12.0.0/22 on vm09 interface eth0 2026-03-10T12:16:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:04 vm09.local ceph-mon[55914]: Deploying daemon keepalived.rgw.foo.vm03.wvnyuc on vm03 2026-03-10T12:16:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:04 vm09.local ceph-mon[55914]: from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:04 vm03.local ceph-mon[47106]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-10T12:16:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:04 vm03.local ceph-mon[47106]: 12.12.1.103 is in 12.12.0.0/22 on vm09 interface eth0 2026-03-10T12:16:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:04 vm03.local ceph-mon[47106]: Deploying daemon keepalived.rgw.foo.vm03.wvnyuc on vm03 2026-03-10T12:16:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:04 vm03.local ceph-mon[47106]: from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:05.004 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:16:05.344 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:05.387 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:05 vm03.local ceph-mon[47106]: pgmap v65: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 235 KiB/s rd, 3.7 KiB/s wr, 413 op/s 2026-03-10T12:16:05.387 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:05 vm03.local ceph-mon[47106]: from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:05 vm09.local ceph-mon[55914]: pgmap v65: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 235 KiB/s rd, 3.7 KiB/s wr, 413 op/s 2026-03-10T12:16:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:05 vm09.local ceph-mon[55914]: from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:05.594 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:16:05.594 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:16:02.315147Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:16:05.646 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:16:06.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:06 vm09.local ceph-mon[55914]: from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:06.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:06 vm09.local ceph-mon[55914]: pgmap v66: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 3.2 KiB/s wr, 356 op/s 2026-03-10T12:16:06.647 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:16:06.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:06 vm03.local ceph-mon[47106]: from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:06.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:06 vm03.local ceph-mon[47106]: pgmap v66: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 3.2 KiB/s wr, 356 op/s 2026-03-10T12:16:06.872 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:07.149 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:16:07.149 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:15:45.037130Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:15:44.297752Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:15:44.297801Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:15:45.037157Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:16:06.567611Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:15:44.297865Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:15:44.297895Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:15:44.297835Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:15:44.297923Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:15:45.037185Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T12:16:07.213 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T12:16:07.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.729 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:16:07.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:07.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:16:08.214 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:16:08.439 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:08.690 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:16:08.690 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:16:08.192371Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:16:07.706835Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:16:07.706879Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:16:08.192397Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:16:06.567611Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "last_refresh": "2026-03-10T12:16:07.707170Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:16:07.706958Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:16:07.706987Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:16:07.706909Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:16:07.707014Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:16:08.192423Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "last_refresh": "2026-03-10T12:16:07.707117Z", "ports": [8000], "running": 4, "size": 4}}] 2026-03-10T12:16:08.770 INFO:tasks.cephadm:rgw.foo has 4/4 2026-03-10T12:16:08.770 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-10T12:16:08.772 INFO:tasks.cephadm:Waiting for ceph service ingress.rgw.foo to start (timeout 300)... 2026-03-10T12:16:08.772 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph orch ls -f json 2026-03-10T12:16:08.989 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:09.013 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:09.013 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.013 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: pgmap v67: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 175 KiB/s rd, 2.8 KiB/s wr, 306 op/s 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.014 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:08 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: pgmap v67: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 175 KiB/s rd, 2.8 KiB/s wr, 306 op/s 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:08 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:16:09.281 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-10T12:16:09.281 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T12:14:06.878705Z", "last_refresh": "2026-03-10T12:16:08.192371Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:14:44.103404Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T12:14:05.608075Z", "last_refresh": "2026-03-10T12:16:07.706835Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:44.833660Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T12:14:05.354514Z", "last_refresh": "2026-03-10T12:16:07.706879Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T12:14:06.302817Z", "last_refresh": "2026-03-10T12:16:08.192397Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:16:06.567611Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-10T12:15:44.134857Z", "last_refresh": "2026-03-10T12:16:07.707170Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-10T12:14:48.350990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T12:14:05.120075Z", "last_refresh": "2026-03-10T12:16:07.706958Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:49.362646Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T12:14:20.003914Z", "last_refresh": "2026-03-10T12:16:07.706987Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:47.563896Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T12:14:06.591139Z", "last_refresh": "2026-03-10T12:16:07.706909Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T12:14:58.856798Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T12:14:58.853551Z", "last_refresh": "2026-03-10T12:16:07.707014Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T12:14:49.366927Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T12:14:05.866922Z", "last_refresh": "2026-03-10T12:16:08.192423Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T12:15:49.822420Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T12:15:49.815881Z", "last_refresh": "2026-03-10T12:16:07.707117Z", "ports": [8000], "running": 4, "size": 4}}] 2026-03-10T12:16:09.331 INFO:tasks.cephadm:ingress.rgw.foo has 4/4 2026-03-10T12:16:09.331 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T12:16:09.334 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'echo "Check while healthy..." 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> curl http://12.12.1.103:9000/ 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> # stop each rgw in turn 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> echo "Check with each rgw stopped in turn..." 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> for rgw in `ceph orch ps | grep ^rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> ceph orch daemon stop $rgw 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep stopped; do echo '"'"'Waiting for $rgw to stop'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> timeout 300 bash -c "while ! curl http://12.12.1.103:9000/ ; do echo '"'"'Waiting for http://12.12.1.103:9000/ to be available'"'"'; sleep 1 ; done" 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> ceph orch daemon start $rgw 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep running; do echo '"'"'Waiting for $rgw to start'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> done 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> # stop each haproxy in turn 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> echo "Check with each haproxy down in turn..." 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> ceph orch daemon stop $haproxy 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep stopped; do echo '"'"'Waiting for $haproxy to stop'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-03-10T12:16:09.334 DEBUG:teuthology.orchestra.run.vm03:> timeout 300 bash -c "while ! curl http://12.12.1.103:9000/ ; do echo '"'"'Waiting for http://12.12.1.103:9000/ to be available'"'"'; sleep 1 ; done" 2026-03-10T12:16:09.335 DEBUG:teuthology.orchestra.run.vm03:> ceph orch daemon start $haproxy 2026-03-10T12:16:09.335 DEBUG:teuthology.orchestra.run.vm03:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep running; do echo '"'"'Waiting for $haproxy to start'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-03-10T12:16:09.335 DEBUG:teuthology.orchestra.run.vm03:> done 2026-03-10T12:16:09.335 DEBUG:teuthology.orchestra.run.vm03:> 2026-03-10T12:16:09.335 DEBUG:teuthology.orchestra.run.vm03:> timeout 300 bash -c "while ! curl http://12.12.1.103:9000/ ; do echo '"'"'Waiting for http://12.12.1.103:9000/ to be available'"'"'; sleep 1 ; done"' 2026-03-10T12:16:09.543 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:16:09.656 INFO:teuthology.orchestra.run.vm03.stdout:Check while healthy... 2026-03-10T12:16:09.663 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:16:09.665 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:16:09.668 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 37400 0 --:--:-- --:--:-- --:--:-- 37400 2026-03-10T12:16:09.669 INFO:teuthology.orchestra.run.vm03.stdout:anonymousCheck with each rgw stopped in turn... 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: Checking dashboard <-> RGW credentials 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: from='client.24421 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:16:09.911 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: Checking dashboard <-> RGW credentials 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: from='client.24421 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:16:09.982 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:16:10.080 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled to stop rgw.foo.vm03.pqsxbr on host 'vm03' 2026-03-10T12:16:10.290 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:10.455 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:10.455 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (22s) 2s ago 22s 99.7M - 19.2.3-678-ge911bdeb 654f31e6858e e80ee5151af3 2026-03-10T12:16:10.455 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (20s) 2s ago 20s 99.0M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:10.455 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (21s) 2s ago 21s 99.4M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:10.455 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (23s) 2s ago 23s 99.4M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:10.671 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: pgmap v68: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 2.2 KiB/s wr, 292 op/s 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:10 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3683791052' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: pgmap v68: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 2.2 KiB/s wr, 292 op/s 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:10 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3683791052' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm03.pqsxbr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: Schedule stop daemon rgw.foo.vm03.pqsxbr 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='client.14704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm03.pqsxbr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: Schedule stop daemon rgw.foo.vm03.pqsxbr 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='client.14704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:12.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:16:13.123 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:12 vm03.local ceph-mon[47106]: Checking dashboard <-> RGW credentials 2026-03-10T12:16:13.123 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:12 vm03.local ceph-mon[47106]: pgmap v69: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 682 B/s wr, 86 op/s 2026-03-10T12:16:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:12 vm09.local ceph-mon[55914]: Checking dashboard <-> RGW credentials 2026-03-10T12:16:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:12 vm09.local ceph-mon[55914]: pgmap v69: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 682 B/s wr, 86 op/s 2026-03-10T12:16:15.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:14 vm09.local ceph-mon[55914]: pgmap v70: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 170 B/s wr, 2 op/s 2026-03-10T12:16:15.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:14 vm03.local ceph-mon[47106]: pgmap v70: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 170 B/s wr, 2 op/s 2026-03-10T12:16:15.845 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:15.998 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:15.998 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (28s) 4s ago 28s 100M - 19.2.3-678-ge911bdeb 654f31e6858e e80ee5151af3 2026-03-10T12:16:15.998 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (26s) 4s ago 26s 99.4M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:15.998 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (27s) 5s ago 27s 99.7M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:15.998 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (28s) 5s ago 28s 99.5M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:16.183 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:16:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:16 vm09.local ceph-mon[55914]: from='client.24459 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:16 vm09.local ceph-mon[55914]: pgmap v71: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 341 B/s wr, 3 op/s 2026-03-10T12:16:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:16 vm09.local ceph-mon[55914]: from='client.14744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:16 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/819312860' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:16 vm03.local ceph-mon[47106]: from='client.24459 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:16 vm03.local ceph-mon[47106]: pgmap v71: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 341 B/s wr, 3 op/s 2026-03-10T12:16:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:16 vm03.local ceph-mon[47106]: from='client.14744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:16 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/819312860' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:19.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:18 vm09.local ceph-mon[55914]: pgmap v72: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.6 KiB/s rd, 341 B/s wr, 3 op/s 2026-03-10T12:16:19.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:18 vm03.local ceph-mon[47106]: pgmap v72: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.6 KiB/s rd, 341 B/s wr, 3 op/s 2026-03-10T12:16:21.364 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:20 vm03.local ceph-mon[47106]: pgmap v73: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.6 KiB/s rd, 341 B/s wr, 3 op/s 2026-03-10T12:16:21.364 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:21.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:20 vm09.local ceph-mon[55914]: pgmap v73: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.6 KiB/s rd, 341 B/s wr, 3 op/s 2026-03-10T12:16:21.518 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:21.518 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (33s) 10s ago 33s 100M - 19.2.3-678-ge911bdeb 654f31e6858e e80ee5151af3 2026-03-10T12:16:21.518 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (31s) 10s ago 31s 99.4M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:21.518 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (32s) 10s ago 32s 99.7M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:21.518 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (34s) 10s ago 34s 99.5M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:21.706 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:16:22.348 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:21 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3408238132' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:22.354 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:21 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3408238132' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:23.257 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:22 vm03.local ceph-mon[47106]: from='client.14752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:23.257 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:22 vm03.local ceph-mon[47106]: from='client.14756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:23.257 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:22 vm03.local ceph-mon[47106]: pgmap v74: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 255 B/s wr, 2 op/s 2026-03-10T12:16:23.257 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:23.257 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:23.257 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:16:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:22 vm09.local ceph-mon[55914]: from='client.14752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:22 vm09.local ceph-mon[55914]: from='client.14756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:22 vm09.local ceph-mon[55914]: pgmap v74: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 255 B/s wr, 2 op/s 2026-03-10T12:16:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: pgmap v75: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 175 B/s wr, 1 op/s 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:16:24.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: pgmap v75: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 175 B/s wr, 1 op/s 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:24.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:16:25.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:25 vm09.local ceph-mon[55914]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T12:16:25.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:25.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:25.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:25 vm03.local ceph-mon[47106]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T12:16:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:16:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:26 vm09.local ceph-mon[55914]: pgmap v76: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 175 B/s wr, 1 op/s 2026-03-10T12:16:26.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:26 vm03.local ceph-mon[47106]: pgmap v76: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 175 B/s wr, 1 op/s 2026-03-10T12:16:26.882 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:27.030 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:27.030 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 3s ago 39s - - 2026-03-10T12:16:27.030 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (37s) 3s ago 37s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:27.030 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (38s) 3s ago 38s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:27.030 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (39s) 3s ago 39s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:27.224 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:16:27.224 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:16:27.224 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:16:27.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:27 vm09.local ceph-mon[55914]: from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:27.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:27 vm09.local ceph-mon[55914]: from='client.14768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:27.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:27 vm03.local ceph-mon[47106]: from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:27.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:27 vm03.local ceph-mon[47106]: from='client.14768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:28.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:28 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2589653137' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:28.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:28 vm09.local ceph-mon[55914]: pgmap v77: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 0 op/s 2026-03-10T12:16:28.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:28 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2589653137' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:28.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:28 vm03.local ceph-mon[47106]: pgmap v77: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 0 op/s 2026-03-10T12:16:30.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:30 vm03.local ceph-mon[47106]: pgmap v78: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:16:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:30 vm09.local ceph-mon[55914]: pgmap v78: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:16:32.401 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:32.554 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:32.554 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 9s ago 44s - - 2026-03-10T12:16:32.554 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (42s) 9s ago 42s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:32.555 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (43s) 9s ago 43s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:32.555 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (45s) 9s ago 45s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:32.747 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:16:32.747 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:16:32.747 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:16:32.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:32 vm03.local ceph-mon[47106]: pgmap v79: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 526 B/s wr, 0 op/s 2026-03-10T12:16:33.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:32 vm09.local ceph-mon[55914]: pgmap v79: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 526 B/s wr, 0 op/s 2026-03-10T12:16:34.102 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:33 vm09.local ceph-mon[55914]: from='client.14776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:34.102 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:33 vm09.local ceph-mon[55914]: from='client.14780 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:34.102 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:33 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3012035306' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:34.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:33 vm03.local ceph-mon[47106]: from='client.14776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:34.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:33 vm03.local ceph-mon[47106]: from='client.14780 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:34.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:33 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3012035306' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:34 vm09.local ceph-mon[55914]: pgmap v80: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 526 B/s wr, 0 op/s 2026-03-10T12:16:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:34 vm03.local ceph-mon[47106]: pgmap v80: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 263 B/s rd, 526 B/s wr, 0 op/s 2026-03-10T12:16:37.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:36 vm09.local ceph-mon[55914]: pgmap v81: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:16:37.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:36 vm03.local ceph-mon[47106]: pgmap v81: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:16:37.920 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:38.073 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:38.073 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 14s ago 50s - - 2026-03-10T12:16:38.073 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (48s) 14s ago 48s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:38.073 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (49s) 14s ago 49s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:38.073 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (51s) 14s ago 50s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:38.262 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:16:38.262 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:16:38.263 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:16:39.102 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:38 vm09.local ceph-mon[55914]: pgmap v82: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:16:39.102 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:38 vm09.local ceph-mon[55914]: from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:39.102 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:38 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1167292966' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:39.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:38 vm03.local ceph-mon[47106]: pgmap v82: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:16:39.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:38 vm03.local ceph-mon[47106]: from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:39.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:38 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1167292966' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:40.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:39 vm09.local ceph-mon[55914]: from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:40.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:39 vm03.local ceph-mon[47106]: from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:40 vm09.local ceph-mon[55914]: pgmap v83: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:16:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.a", "id": [0, 4]}]: dispatch 2026-03-10T12:16:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.4", "id": [1, 5]}]: dispatch 2026-03-10T12:16:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.d", "id": [4, 2]}]: dispatch 2026-03-10T12:16:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.16", "id": [1, 2]}]: dispatch 2026-03-10T12:16:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:40 vm03.local ceph-mon[47106]: pgmap v83: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:16:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.a", "id": [0, 4]}]: dispatch 2026-03-10T12:16:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.4", "id": [1, 5]}]: dispatch 2026-03-10T12:16:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.d", "id": [4, 2]}]: dispatch 2026-03-10T12:16:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.16", "id": [1, 2]}]: dispatch 2026-03-10T12:16:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:42.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:41 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.a", "id": [0, 4]}]': finished 2026-03-10T12:16:42.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:41 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.4", "id": [1, 5]}]': finished 2026-03-10T12:16:42.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:41 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.d", "id": [4, 2]}]': finished 2026-03-10T12:16:42.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:41 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.16", "id": [1, 2]}]': finished 2026-03-10T12:16:42.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:41 vm09.local ceph-mon[55914]: osdmap e36: 8 total, 8 up, 8 in 2026-03-10T12:16:42.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:41 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.a", "id": [0, 4]}]': finished 2026-03-10T12:16:42.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:41 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.4", "id": [1, 5]}]': finished 2026-03-10T12:16:42.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:41 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.d", "id": [4, 2]}]': finished 2026-03-10T12:16:42.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:41 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.16", "id": [1, 2]}]': finished 2026-03-10T12:16:42.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:41 vm03.local ceph-mon[47106]: osdmap e36: 8 total, 8 up, 8 in 2026-03-10T12:16:43.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:42 vm09.local ceph-mon[55914]: pgmap v85: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:16:43.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:42 vm09.local ceph-mon[55914]: osdmap e37: 8 total, 8 up, 8 in 2026-03-10T12:16:43.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:42 vm03.local ceph-mon[47106]: pgmap v85: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:16:43.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:42 vm03.local ceph-mon[47106]: osdmap e37: 8 total, 8 up, 8 in 2026-03-10T12:16:43.443 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:43.610 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:43.610 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 20s ago 55s - - 2026-03-10T12:16:43.610 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (53s) 20s ago 53s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:43.610 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (54s) 20s ago 54s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:43.610 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (56s) 20s ago 56s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:43.801 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:16:43.801 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:16:43.801 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:16:44.102 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:43 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1292873381' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:44.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:43 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1292873381' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:45.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:44 vm09.local ceph-mon[55914]: from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:45.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:44 vm09.local ceph-mon[55914]: pgmap v87: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:16:45.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:44 vm09.local ceph-mon[55914]: from='client.24489 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:45.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:44 vm03.local ceph-mon[47106]: from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:45.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:44 vm03.local ceph-mon[47106]: pgmap v87: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:16:45.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:44 vm03.local ceph-mon[47106]: from='client.24489 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:47.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:46 vm09.local ceph-mon[55914]: pgmap v88: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 44 B/s, 1 objects/s recovering 2026-03-10T12:16:47.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:46 vm03.local ceph-mon[47106]: pgmap v88: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 44 B/s, 1 objects/s recovering 2026-03-10T12:16:48.862 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:48 vm03.local ceph-mon[47106]: pgmap v89: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 44 B/s, 1 objects/s recovering 2026-03-10T12:16:48.983 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:49.133 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:49.133 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 25s ago 61s - - 2026-03-10T12:16:49.133 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (59s) 25s ago 59s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:49.133 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (60s) 25s ago 60s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:49.133 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (62s) 25s ago 62s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:49.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:48 vm09.local ceph-mon[55914]: pgmap v89: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 44 B/s, 1 objects/s recovering 2026-03-10T12:16:49.335 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:16:49.335 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:16:49.335 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:16:50.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:49 vm09.local ceph-mon[55914]: from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:50.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:49 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2770407030' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:50.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:49 vm03.local ceph-mon[47106]: from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:50.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:49 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2770407030' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:50 vm09.local ceph-mon[55914]: from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:50 vm09.local ceph-mon[55914]: pgmap v90: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 40 B/s, 1 objects/s recovering 2026-03-10T12:16:51.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:50 vm03.local ceph-mon[47106]: from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:51.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:50 vm03.local ceph-mon[47106]: pgmap v90: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 40 B/s, 1 objects/s recovering 2026-03-10T12:16:53.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:52 vm09.local ceph-mon[55914]: pgmap v91: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 35 B/s, 1 objects/s recovering 2026-03-10T12:16:53.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:52 vm03.local ceph-mon[47106]: pgmap v91: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 35 B/s, 1 objects/s recovering 2026-03-10T12:16:54.520 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:16:54.676 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:16:54.676 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 31s ago 66s - - 2026-03-10T12:16:54.676 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (64s) 31s ago 64s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:16:54.676 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (65s) 31s ago 65s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:16:54.676 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (67s) 31s ago 67s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:16:54.871 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:16:54.871 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:16:54.871 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:16:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:54 vm09.local ceph-mon[55914]: pgmap v92: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 30 B/s, 0 objects/s recovering 2026-03-10T12:16:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1205563401' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:55.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:54 vm03.local ceph-mon[47106]: pgmap v92: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 30 B/s, 0 objects/s recovering 2026-03-10T12:16:55.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1205563401' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:16:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:55 vm09.local ceph-mon[55914]: from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:55 vm09.local ceph-mon[55914]: from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:56.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:55 vm03.local ceph-mon[47106]: from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:56.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:55 vm03.local ceph-mon[47106]: from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:16:56.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:16:57.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:56 vm09.local ceph-mon[55914]: pgmap v93: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 29 B/s, 0 objects/s recovering 2026-03-10T12:16:57.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:56 vm03.local ceph-mon[47106]: pgmap v93: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 29 B/s, 0 objects/s recovering 2026-03-10T12:16:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:16:58 vm03.local ceph-mon[47106]: pgmap v94: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 0 B/s, 0 objects/s recovering 2026-03-10T12:16:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:16:58 vm09.local ceph-mon[55914]: pgmap v94: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 0 B/s, 0 objects/s recovering 2026-03-10T12:17:00.053 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:00.206 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:00.206 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 36s ago 72s - - 2026-03-10T12:17:00.206 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (70s) 36s ago 70s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:00.206 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (71s) 37s ago 71s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:00.206 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (73s) 37s ago 73s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:00.394 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:00.394 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:00.394 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:01.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:00 vm09.local ceph-mon[55914]: pgmap v95: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:01.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:00 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4060919403' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:00 vm03.local ceph-mon[47106]: pgmap v95: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:00 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4060919403' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:01 vm09.local ceph-mon[55914]: from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:01 vm09.local ceph-mon[55914]: from='client.24515 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:01 vm03.local ceph-mon[47106]: from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:01 vm03.local ceph-mon[47106]: from='client.24515 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:02 vm09.local ceph-mon[55914]: pgmap v96: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:03.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:02 vm03.local ceph-mon[47106]: pgmap v96: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:04 vm09.local ceph-mon[55914]: pgmap v97: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:05.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:04 vm03.local ceph-mon[47106]: pgmap v97: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:05.575 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:05.727 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:05.727 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 42s ago 77s - - 2026-03-10T12:17:05.727 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (75s) 42s ago 75s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:05.727 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (76s) 42s ago 76s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:05.727 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (78s) 42s ago 78s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:05.920 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:05.920 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:05.920 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:05 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/669748442' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:06.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:05 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/669748442' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:06 vm09.local ceph-mon[55914]: from='client.14848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:06 vm09.local ceph-mon[55914]: pgmap v98: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:06 vm09.local ceph-mon[55914]: from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:07.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:06 vm03.local ceph-mon[47106]: from='client.14848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:07.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:06 vm03.local ceph-mon[47106]: pgmap v98: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:07.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:06 vm03.local ceph-mon[47106]: from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:09.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:08 vm03.local ceph-mon[47106]: pgmap v99: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:08 vm09.local ceph-mon[55914]: pgmap v99: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:10.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:11.104 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:11.277 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:11.277 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 47s ago 83s - - 2026-03-10T12:17:11.277 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (81s) 47s ago 81s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:11.277 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (82s) 48s ago 82s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:11.277 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (84s) 48s ago 84s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:11.277 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:10 vm03.local ceph-mon[47106]: pgmap v100: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:10 vm09.local ceph-mon[55914]: pgmap v100: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:11.467 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:11.467 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:11.467 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:12.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:11 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/62960001' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:12.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:11 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/62960001' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:13.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:12 vm09.local ceph-mon[55914]: from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:13.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:12 vm09.local ceph-mon[55914]: from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:13.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:12 vm09.local ceph-mon[55914]: pgmap v101: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:13.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:12 vm03.local ceph-mon[47106]: from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:13.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:12 vm03.local ceph-mon[47106]: from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:13.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:12 vm03.local ceph-mon[47106]: pgmap v101: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:15.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:14 vm09.local ceph-mon[55914]: pgmap v102: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:15.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:14 vm03.local ceph-mon[47106]: pgmap v102: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:16.644 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:16.803 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:16.803 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 53s ago 88s - - 2026-03-10T12:17:16.803 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (87s) 53s ago 87s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:16.803 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (88s) 53s ago 88s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:16.803 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (89s) 53s ago 89s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:17.008 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:17.008 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:17.009 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:16 vm09.local ceph-mon[55914]: pgmap v103: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:16 vm03.local ceph-mon[47106]: pgmap v103: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:18.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:17 vm09.local ceph-mon[55914]: from='client.24541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:18.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:17 vm09.local ceph-mon[55914]: from='client.24545 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:18.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:17 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/428567193' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:18.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:17 vm03.local ceph-mon[47106]: from='client.24541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:18.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:17 vm03.local ceph-mon[47106]: from='client.24545 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:18.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:17 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/428567193' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:19.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:18 vm03.local ceph-mon[47106]: pgmap v104: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:19.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:18 vm09.local ceph-mon[55914]: pgmap v104: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:21.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:21 vm09.local ceph-mon[55914]: pgmap v105: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:21.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:21 vm03.local ceph-mon[47106]: pgmap v105: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:22.193 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:22.363 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:22.363 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 58s ago 94s - - 2026-03-10T12:17:22.363 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (92s) 58s ago 92s 100M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:22.363 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (93s) 59s ago 93s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:22.364 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (95s) 59s ago 95s 100M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:22.556 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:22.556 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:22.556 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:23 vm09.local ceph-mon[55914]: pgmap v106: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:23 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1321601464' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:23.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:23 vm03.local ceph-mon[47106]: pgmap v106: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:23.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:23 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1321601464' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:24.168 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:24 vm03.local ceph-mon[47106]: from='client.14884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:24.169 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:24 vm03.local ceph-mon[47106]: from='client.24555 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:24.169 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:24 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:17:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:24 vm09.local ceph-mon[55914]: from='client.14884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:24 vm09.local ceph-mon[55914]: from='client.24555 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:24 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:17:25.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:25 vm09.local ceph-mon[55914]: pgmap v107: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:25.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:25.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:25.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:25.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:25 vm03.local ceph-mon[47106]: pgmap v107: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:25.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:25.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:25.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: pgmap v108: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: pgmap v109: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: pgmap v108: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: pgmap v109: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:17:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:17:27.740 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:27.893 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:27.893 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 2s ago 99s - - 2026-03-10T12:17:27.893 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (98s) 2s ago 98s 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:27.893 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (99s) 3s ago 99s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:27.893 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (100s) 3s ago 100s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:28.086 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:28.086 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:28.087 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:28.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:28 vm09.local ceph-mon[55914]: pgmap v110: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:28.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:28 vm09.local ceph-mon[55914]: from='client.14896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:28.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:28 vm09.local ceph-mon[55914]: from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:28.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:28 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/297412648' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:28 vm03.local ceph-mon[47106]: pgmap v110: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:28 vm03.local ceph-mon[47106]: from='client.14896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:28 vm03.local ceph-mon[47106]: from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:28.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:28 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/297412648' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:30.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:30 vm09.local ceph-mon[55914]: pgmap v111: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:30.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:30 vm03.local ceph-mon[47106]: pgmap v111: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:32.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:32 vm09.local ceph-mon[55914]: pgmap v112: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 623 B/s wr, 0 op/s 2026-03-10T12:17:32.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:32 vm03.local ceph-mon[47106]: pgmap v112: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 623 B/s wr, 0 op/s 2026-03-10T12:17:33.266 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:33.418 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:33.418 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 8s ago 105s - - 2026-03-10T12:17:33.418 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (103s) 8s ago 103s 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:33.418 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (104s) 8s ago 104s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:33.418 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (106s) 8s ago 106s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:33.606 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:33.606 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:33.606 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:34.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:34 vm09.local ceph-mon[55914]: from='client.14908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:34.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:34 vm09.local ceph-mon[55914]: from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:34.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:34 vm09.local ceph-mon[55914]: pgmap v113: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 623 B/s wr, 0 op/s 2026-03-10T12:17:34.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:34 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3174403487' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:34 vm03.local ceph-mon[47106]: from='client.14908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:34 vm03.local ceph-mon[47106]: from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:34 vm03.local ceph-mon[47106]: pgmap v113: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 623 B/s wr, 0 op/s 2026-03-10T12:17:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:34 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3174403487' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:36 vm09.local ceph-mon[55914]: pgmap v114: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T12:17:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:36 vm03.local ceph-mon[47106]: pgmap v114: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T12:17:38.792 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:38 vm03.local ceph-mon[47106]: pgmap v115: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:38.792 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:38.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:38 vm09.local ceph-mon[55914]: pgmap v115: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:38.946 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:38.946 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 13s ago 110s - - 2026-03-10T12:17:38.946 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (109s) 13s ago 109s 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:38.946 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (110s) 14s ago 110s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:38.946 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (111s) 14s ago 111s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:39.161 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:39.161 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:39.161 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:39.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:39 vm09.local ceph-mon[55914]: from='client.14920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:39.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:39 vm09.local ceph-mon[55914]: from='client.24585 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:39.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:39 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1755719796' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:39.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:39 vm03.local ceph-mon[47106]: from='client.14920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:39.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:39 vm03.local ceph-mon[47106]: from='client.24585 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:39.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:39 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1755719796' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:40 vm09.local ceph-mon[55914]: pgmap v116: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:40.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:40 vm03.local ceph-mon[47106]: pgmap v116: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:42 vm09.local ceph-mon[55914]: pgmap v117: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:42.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:42 vm03.local ceph-mon[47106]: pgmap v117: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:17:44.350 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:44.503 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:44.504 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 19s ago 116s - - 2026-03-10T12:17:44.504 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (114s) 19s ago 114s 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:44.504 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (115s) 19s ago 115s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:44.504 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (117s) 19s ago 117s 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:44.695 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:44.695 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:44.695 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:44 vm09.local ceph-mon[55914]: pgmap v118: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:44.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:44 vm03.local ceph-mon[47106]: pgmap v118: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:45.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:45 vm09.local ceph-mon[55914]: from='client.14932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:45.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:45 vm09.local ceph-mon[55914]: from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:45.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:45 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3721134232' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:45.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:45 vm03.local ceph-mon[47106]: from='client.14932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:45.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:45 vm03.local ceph-mon[47106]: from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:45.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:45 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3721134232' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:46 vm09.local ceph-mon[55914]: pgmap v119: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:46.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:46 vm03.local ceph-mon[47106]: pgmap v119: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:48 vm09.local ceph-mon[55914]: pgmap v120: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:48 vm03.local ceph-mon[47106]: pgmap v120: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:49.879 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:50.038 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:50.038 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 24s ago 2m - - 2026-03-10T12:17:50.038 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 24s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:50.038 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 25s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:50.038 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 25s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:50.235 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:50.235 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:50.235 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:50 vm09.local ceph-mon[55914]: pgmap v121: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:50 vm09.local ceph-mon[55914]: from='client.14944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:50 vm09.local ceph-mon[55914]: from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:50 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2739380856' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:50 vm03.local ceph-mon[47106]: pgmap v121: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:50 vm03.local ceph-mon[47106]: from='client.14944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:50 vm03.local ceph-mon[47106]: from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:50 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2739380856' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:52.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:52 vm09.local ceph-mon[55914]: pgmap v122: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:52.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:52 vm03.local ceph-mon[47106]: pgmap v122: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:54 vm09.local ceph-mon[55914]: pgmap v123: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:54 vm03.local ceph-mon[47106]: pgmap v123: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:55.425 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:17:55.582 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:17:55.583 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 30s ago 2m - - 2026-03-10T12:17:55.583 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 30s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:17:55.583 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 30s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:17:55.583 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 30s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:17:55.774 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:55.775 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:17:55.775 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:17:55.775 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:17:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:17:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:56 vm09.local ceph-mon[55914]: from='client.14956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:56 vm09.local ceph-mon[55914]: pgmap v124: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:56 vm09.local ceph-mon[55914]: from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:56 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2258832800' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:56.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:56 vm03.local ceph-mon[47106]: from='client.14956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:56.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:56 vm03.local ceph-mon[47106]: pgmap v124: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:56.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:56 vm03.local ceph-mon[47106]: from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:17:56.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:56 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2258832800' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:17:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:17:58 vm09.local ceph-mon[55914]: pgmap v125: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:17:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:17:58 vm03.local ceph-mon[47106]: pgmap v125: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:00 vm09.local ceph-mon[55914]: pgmap v126: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:00.913 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:00 vm03.local ceph-mon[47106]: pgmap v126: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:00.955 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:01.112 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:01.112 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 35s ago 2m - - 2026-03-10T12:18:01.112 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 35s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:01.112 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 36s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:01.112 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 36s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:01.308 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:01.308 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:01.308 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:01.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:01 vm09.local ceph-mon[55914]: from='client.14968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:01.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:01 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/116535689' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:01.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:01 vm03.local ceph-mon[47106]: from='client.14968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:01.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:01 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/116535689' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:02 vm09.local ceph-mon[55914]: from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:02 vm09.local ceph-mon[55914]: pgmap v127: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:02.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:02 vm03.local ceph-mon[47106]: from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:02.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:02 vm03.local ceph-mon[47106]: pgmap v127: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:04 vm09.local ceph-mon[55914]: pgmap v128: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:04.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:04 vm03.local ceph-mon[47106]: pgmap v128: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:06.478 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:06.631 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:06.631 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 41s ago 2m - - 2026-03-10T12:18:06.631 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 41s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:06.631 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 41s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:06.632 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 41s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:06.829 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:06 vm03.local ceph-mon[47106]: pgmap v129: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:06.830 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:06.830 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:06.830 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:06 vm09.local ceph-mon[55914]: pgmap v129: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:08.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:07 vm09.local ceph-mon[55914]: from='client.24623 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:08.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:07 vm09.local ceph-mon[55914]: from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:08.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:07 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3239421570' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:08.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:07 vm03.local ceph-mon[47106]: from='client.24623 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:08.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:07 vm03.local ceph-mon[47106]: from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:08.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:07 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3239421570' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:09.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:08 vm09.local ceph-mon[55914]: pgmap v130: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:08 vm03.local ceph-mon[47106]: pgmap v130: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:11.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:10 vm09.local ceph-mon[55914]: pgmap v131: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:11.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:11.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:10 vm03.local ceph-mon[47106]: pgmap v131: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:11.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:11.999 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:12.145 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:12.146 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 46s ago 2m - - 2026-03-10T12:18:12.146 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 46s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:12.146 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 47s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:12.146 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 47s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:12.345 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:12.345 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:12.345 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:12 vm09.local ceph-mon[55914]: pgmap v132: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:12 vm09.local ceph-mon[55914]: from='client.14992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:12 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/284215652' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:13.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:12 vm03.local ceph-mon[47106]: pgmap v132: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:13.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:12 vm03.local ceph-mon[47106]: from='client.14992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:13.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:12 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/284215652' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:14.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:13 vm09.local ceph-mon[55914]: from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:14.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:13 vm03.local ceph-mon[47106]: from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:15.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:14 vm09.local ceph-mon[55914]: pgmap v133: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:15.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:14 vm03.local ceph-mon[47106]: pgmap v133: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:16 vm09.local ceph-mon[55914]: pgmap v134: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:17.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:16 vm03.local ceph-mon[47106]: pgmap v134: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:17.519 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:17.666 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:17.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 52s ago 2m - - 2026-03-10T12:18:17.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 52s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:17.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 52s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:17.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 52s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:17.851 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:17.851 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:17.851 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:19.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:18 vm09.local ceph-mon[55914]: pgmap v135: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:19.111 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:18 vm09.local ceph-mon[55914]: from='client.15004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:19.111 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:18 vm09.local ceph-mon[55914]: from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:19.111 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:18 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/184414905' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:19.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:18 vm03.local ceph-mon[47106]: pgmap v135: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:19.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:18 vm03.local ceph-mon[47106]: from='client.15004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:19.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:18 vm03.local ceph-mon[47106]: from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:19.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:18 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/184414905' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:21.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:20 vm09.local ceph-mon[55914]: pgmap v136: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:21.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:20 vm03.local ceph-mon[47106]: pgmap v136: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:23.020 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:22 vm09.local ceph-mon[55914]: pgmap v137: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:23.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:22 vm03.local ceph-mon[47106]: pgmap v137: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:23.182 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:23.182 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 57s ago 2m - - 2026-03-10T12:18:23.182 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 57s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:23.182 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 58s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:23.182 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 58s ago 2m 103M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:23.370 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:23.370 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:23.370 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:24.111 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:23 vm09.local ceph-mon[55914]: from='client.15016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:24.111 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:23 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1929790335' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:23 vm03.local ceph-mon[47106]: from='client.15016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:23 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1929790335' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:24.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:24 vm03.local ceph-mon[47106]: from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:24.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:24 vm03.local ceph-mon[47106]: pgmap v138: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:24 vm09.local ceph-mon[55914]: from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:24 vm09.local ceph-mon[55914]: pgmap v138: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:26.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:26.044 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:18:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:18:26.912 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:26 vm03.local ceph-mon[47106]: pgmap v139: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:26.913 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:26.913 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:26 vm09.local ceph-mon[55914]: pgmap v139: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: pgmap v140: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: pgmap v141: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: pgmap v140: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: pgmap v141: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:18:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:18:28.578 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:28.742 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:28.742 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 1s ago 2m - - 2026-03-10T12:18:28.742 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 1s ago 2m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:28.743 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 2s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:28.743 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 2s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:28.955 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:28.955 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:28.955 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:29 vm09.local ceph-mon[55914]: from='client.15028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:29 vm09.local ceph-mon[55914]: from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:29 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4090769932' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:29.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:29 vm03.local ceph-mon[47106]: from='client.15028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:29.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:29 vm03.local ceph-mon[47106]: from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:29.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:29 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4090769932' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:30 vm09.local ceph-mon[55914]: pgmap v142: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:30.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:30 vm03.local ceph-mon[47106]: pgmap v142: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:32 vm09.local ceph-mon[55914]: pgmap v143: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 622 B/s wr, 0 op/s 2026-03-10T12:18:32.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:32 vm03.local ceph-mon[47106]: pgmap v143: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 622 B/s wr, 0 op/s 2026-03-10T12:18:34.156 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:34.345 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:34.345 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 7s ago 2m - - 2026-03-10T12:18:34.345 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 7s ago 2m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:34.345 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 7s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:34.345 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 7s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:34 vm03.local ceph-mon[47106]: pgmap v144: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 622 B/s wr, 0 op/s 2026-03-10T12:18:34.586 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:34.586 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:34.586 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:34.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:34 vm09.local ceph-mon[55914]: pgmap v144: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 622 B/s wr, 0 op/s 2026-03-10T12:18:35.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:35 vm09.local ceph-mon[55914]: from='client.15040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:35.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:35 vm09.local ceph-mon[55914]: from='client.24659 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:35.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:35 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/780439905' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:35.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:35 vm03.local ceph-mon[47106]: from='client.15040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:35.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:35 vm03.local ceph-mon[47106]: from='client.24659 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:35.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:35 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/780439905' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:36.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:36 vm09.local ceph-mon[55914]: pgmap v145: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 622 B/s wr, 0 op/s 2026-03-10T12:18:36.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:36 vm03.local ceph-mon[47106]: pgmap v145: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 622 B/s wr, 0 op/s 2026-03-10T12:18:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:38 vm09.local ceph-mon[55914]: pgmap v146: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T12:18:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:38 vm03.local ceph-mon[47106]: pgmap v146: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T12:18:39.778 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:39.972 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:39.972 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 12s ago 2m - - 2026-03-10T12:18:39.972 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 12s ago 2m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:39.972 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 13s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:39.972 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 13s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:40.186 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:40.186 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:40.186 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:40.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:40 vm03.local ceph-mon[47106]: pgmap v147: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:40.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:40 vm03.local ceph-mon[47106]: from='client.15052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:40.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:40.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:40 vm03.local ceph-mon[47106]: from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:40.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:40 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/655393403' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:40 vm09.local ceph-mon[55914]: pgmap v147: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:40 vm09.local ceph-mon[55914]: from='client.15052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:40 vm09.local ceph-mon[55914]: from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:40 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/655393403' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:42.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:42 vm03.local ceph-mon[47106]: pgmap v148: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:42 vm09.local ceph-mon[55914]: pgmap v148: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:18:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:44 vm03.local ceph-mon[47106]: pgmap v149: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:44 vm09.local ceph-mon[55914]: pgmap v149: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:45.381 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:45.554 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:45.554 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 18s ago 2m - - 2026-03-10T12:18:45.554 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 18s ago 2m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:45.554 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (2m) 19s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:45.554 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (2m) 19s ago 2m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:45.757 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:45.757 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:45.757 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:46 vm09.local ceph-mon[55914]: pgmap v150: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:46 vm09.local ceph-mon[55914]: from='client.15064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:46 vm09.local ceph-mon[55914]: from='client.24677 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:46 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3935349819' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:46.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:46 vm03.local ceph-mon[47106]: pgmap v150: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:46.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:46 vm03.local ceph-mon[47106]: from='client.15064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:46.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:46 vm03.local ceph-mon[47106]: from='client.24677 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:46.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:46 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3935349819' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:48 vm09.local ceph-mon[55914]: pgmap v151: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:48.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:48 vm03.local ceph-mon[47106]: pgmap v151: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:50 vm09.local ceph-mon[55914]: pgmap v152: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:50 vm03.local ceph-mon[47106]: pgmap v152: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:50.955 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:51.134 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:51.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 24s ago 3m - - 2026-03-10T12:18:51.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 24s ago 3m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:51.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 24s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:51.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 24s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:51.378 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:51.378 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:51.378 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:51.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:51 vm03.local ceph-mon[47106]: from='client.15076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:51.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:51 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1653713767' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:51.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:51 vm09.local ceph-mon[55914]: from='client.15076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:51.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:51 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1653713767' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:52.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:52 vm09.local ceph-mon[55914]: from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:52.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:52 vm09.local ceph-mon[55914]: pgmap v153: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:52.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:52 vm03.local ceph-mon[47106]: from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:52.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:52 vm03.local ceph-mon[47106]: pgmap v153: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:54 vm09.local ceph-mon[55914]: pgmap v154: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:54 vm03.local ceph-mon[47106]: pgmap v154: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:55.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:18:56.574 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:18:56.751 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:18:56.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 29s ago 3m - - 2026-03-10T12:18:56.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 29s ago 3m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:18:56.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 30s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:18:56.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 30s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:18:56.751 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:56 vm03.local ceph-mon[47106]: pgmap v155: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:56 vm09.local ceph-mon[55914]: pgmap v155: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:56.962 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:18:56.962 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:18:56.962 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:18:57.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:57 vm09.local ceph-mon[55914]: from='client.15088 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:57.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:57 vm09.local ceph-mon[55914]: from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:57.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:57 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2237993273' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:57.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:57 vm03.local ceph-mon[47106]: from='client.15088 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:57.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:57 vm03.local ceph-mon[47106]: from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:18:57.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:57 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2237993273' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:18:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:18:58 vm09.local ceph-mon[55914]: pgmap v156: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:18:58.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:18:58 vm03.local ceph-mon[47106]: pgmap v156: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:00 vm09.local ceph-mon[55914]: pgmap v157: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:00.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:00 vm03.local ceph-mon[47106]: pgmap v157: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:02.156 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:02.321 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:02.321 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 35s ago 3m - - 2026-03-10T12:19:02.321 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 35s ago 3m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:02.321 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 35s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:02.321 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 35s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:02.536 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:02.536 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:02.536 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:02 vm09.local ceph-mon[55914]: pgmap v158: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:02.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:02 vm03.local ceph-mon[47106]: pgmap v158: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:03.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:03 vm09.local ceph-mon[55914]: from='client.15100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:03.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:03 vm09.local ceph-mon[55914]: from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:03.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:03 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3057966827' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:03.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:03 vm03.local ceph-mon[47106]: from='client.15100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:03.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:03 vm03.local ceph-mon[47106]: from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:03.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:03 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3057966827' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:04 vm09.local ceph-mon[55914]: pgmap v159: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:04.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:04 vm03.local ceph-mon[47106]: pgmap v159: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:06.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:06 vm09.local ceph-mon[55914]: pgmap v160: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:06.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:06 vm03.local ceph-mon[47106]: pgmap v160: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:07.721 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:07.889 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:07.890 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 40s ago 3m - - 2026-03-10T12:19:07.890 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 40s ago 3m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:07.890 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 41s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:07.890 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 41s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:08.101 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:08.101 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:08.101 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:08 vm09.local ceph-mon[55914]: pgmap v161: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:08 vm09.local ceph-mon[55914]: from='client.15112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:08 vm09.local ceph-mon[55914]: from='client.24705 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:08 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4261367938' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:08.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:08 vm03.local ceph-mon[47106]: pgmap v161: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:08.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:08 vm03.local ceph-mon[47106]: from='client.15112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:08.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:08 vm03.local ceph-mon[47106]: from='client.24705 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:08.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:08 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4261367938' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:10 vm09.local ceph-mon[55914]: pgmap v162: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:10 vm03.local ceph-mon[47106]: pgmap v162: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:12.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:12 vm09.local ceph-mon[55914]: pgmap v163: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:12.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:12 vm03.local ceph-mon[47106]: pgmap v163: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:13.309 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:13.487 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:13.487 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 46s ago 3m - - 2026-03-10T12:19:13.487 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 46s ago 3m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:13.487 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 46s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:13.487 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 46s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:13.693 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:13.693 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:13.693 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:14 vm09.local ceph-mon[55914]: from='client.15124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:14 vm09.local ceph-mon[55914]: pgmap v164: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:14 vm09.local ceph-mon[55914]: from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:14 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/730061200' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:14.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:14 vm03.local ceph-mon[47106]: from='client.15124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:14.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:14 vm03.local ceph-mon[47106]: pgmap v164: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:14.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:14 vm03.local ceph-mon[47106]: from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:14.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:14 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/730061200' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:16.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:16 vm09.local ceph-mon[55914]: pgmap v165: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:16.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:16 vm03.local ceph-mon[47106]: pgmap v165: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:18.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:18 vm09.local ceph-mon[55914]: pgmap v166: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:18.906 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:18 vm03.local ceph-mon[47106]: pgmap v166: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:18.907 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:19.075 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:19.075 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 52s ago 3m - - 2026-03-10T12:19:19.075 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 52s ago 3m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:19.075 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 52s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:19.075 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 52s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:19.291 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:19.291 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:19.291 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:19 vm09.local ceph-mon[55914]: from='client.15136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:19 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2767985134' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:19.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:19 vm03.local ceph-mon[47106]: from='client.15136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:19.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:19 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2767985134' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:20.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:20 vm09.local ceph-mon[55914]: from='client.15140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:20.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:20 vm09.local ceph-mon[55914]: pgmap v167: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:20.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:20 vm03.local ceph-mon[47106]: from='client.15140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:20.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:20 vm03.local ceph-mon[47106]: pgmap v167: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:22.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:22 vm03.local ceph-mon[47106]: pgmap v168: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:22 vm09.local ceph-mon[55914]: pgmap v168: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:24.491 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:24.664 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:24.664 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 57s ago 3m - - 2026-03-10T12:19:24.664 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 57s ago 3m 111M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:24.664 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 58s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:24.664 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 58s ago 3m 106M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:24.864 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:24.864 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:24.864 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:24.865 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:24 vm03.local ceph-mon[47106]: pgmap v169: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:24 vm09.local ceph-mon[55914]: pgmap v169: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:25 vm09.local ceph-mon[55914]: from='client.15148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:25 vm09.local ceph-mon[55914]: from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:25 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2417105074' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:26.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:25 vm03.local ceph-mon[47106]: from='client.15148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:26.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:25 vm03.local ceph-mon[47106]: from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:26.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:25 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2417105074' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:26.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:26 vm09.local ceph-mon[55914]: pgmap v170: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:27.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:26 vm03.local ceph-mon[47106]: pgmap v170: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:27.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:27 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:19:27.955 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:27 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:19:28.943 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:28 vm03.local ceph-mon[47106]: pgmap v171: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:28.943 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:28.943 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:28 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:29.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:28 vm09.local ceph-mon[55914]: pgmap v171: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:29.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:29.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:28 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:30.057 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:30.218 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:30.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 1s ago 3m - - 2026-03-10T12:19:30.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 1s ago 3m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:30.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 1s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:30.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 1s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:19:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:19:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:19:30.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:30.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:30.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:19:30.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:19:30.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:19:30.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:19:30.428 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:30.428 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:30.428 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:31.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:31 vm09.local ceph-mon[55914]: pgmap v172: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:31.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:31 vm09.local ceph-mon[55914]: pgmap v173: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:31.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:31 vm09.local ceph-mon[55914]: from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:31.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:31 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1434127751' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:31 vm03.local ceph-mon[47106]: pgmap v172: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:31 vm03.local ceph-mon[47106]: pgmap v173: 129 pgs: 129 active+clean; 454 KiB data, 227 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:31 vm03.local ceph-mon[47106]: from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:31 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1434127751' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:32 vm09.local ceph-mon[55914]: from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:32 vm03.local ceph-mon[47106]: from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:33 vm09.local ceph-mon[55914]: pgmap v174: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:33.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:33 vm03.local ceph-mon[47106]: pgmap v174: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:35 vm09.local ceph-mon[55914]: pgmap v175: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:35.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:35 vm03.local ceph-mon[47106]: pgmap v175: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:35.638 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:35.802 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:35.802 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 6s ago 3m - - 2026-03-10T12:19:35.802 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 6s ago 3m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:35.802 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 7s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:35.802 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 7s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:36.002 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:36.002 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:36.002 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:36 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1826852899' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:36 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1826852899' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:37 vm09.local ceph-mon[55914]: pgmap v176: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:37 vm09.local ceph-mon[55914]: from='client.24743 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:37 vm09.local ceph-mon[55914]: from='client.15174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:37.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:37 vm03.local ceph-mon[47106]: pgmap v176: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:37.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:37 vm03.local ceph-mon[47106]: from='client.24743 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:37.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:37 vm03.local ceph-mon[47106]: from='client.15174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:39 vm09.local ceph-mon[55914]: pgmap v177: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:39.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:39 vm03.local ceph-mon[47106]: pgmap v177: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 309 B/s rd, 619 B/s wr, 0 op/s 2026-03-10T12:19:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:40.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:41.199 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:41.371 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:41.371 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 12s ago 3m - - 2026-03-10T12:19:41.371 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 12s ago 3m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:41.371 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 12s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:41.371 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 12s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:41.371 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:41 vm03.local ceph-mon[47106]: pgmap v178: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T12:19:41.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:41 vm09.local ceph-mon[55914]: pgmap v178: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T12:19:41.575 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:41.575 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:41.575 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:42 vm09.local ceph-mon[55914]: from='client.15182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:42 vm09.local ceph-mon[55914]: pgmap v179: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:42 vm09.local ceph-mon[55914]: from='client.15186 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:42 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1760813226' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:42 vm03.local ceph-mon[47106]: from='client.15182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:42 vm03.local ceph-mon[47106]: pgmap v179: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:19:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:42 vm03.local ceph-mon[47106]: from='client.15186 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:42 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1760813226' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:44 vm09.local ceph-mon[55914]: pgmap v180: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:44.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:44 vm03.local ceph-mon[47106]: pgmap v180: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:46.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:46 vm09.local ceph-mon[55914]: pgmap v181: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:46.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:46 vm03.local ceph-mon[47106]: pgmap v181: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:46.774 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:46.952 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:46.952 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 17s ago 3m - - 2026-03-10T12:19:46.952 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 17s ago 3m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:46.952 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (3m) 18s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:46.952 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (3m) 18s ago 3m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:47.170 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:47.171 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:47.171 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:47 vm09.local ceph-mon[55914]: from='client.15194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:47 vm09.local ceph-mon[55914]: from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:47 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3507310697' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:47.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:47 vm03.local ceph-mon[47106]: from='client.15194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:47.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:47 vm03.local ceph-mon[47106]: from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:47.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:47 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3507310697' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:48.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:48 vm09.local ceph-mon[55914]: pgmap v182: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:48.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:48 vm03.local ceph-mon[47106]: pgmap v182: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:50.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:50 vm09.local ceph-mon[55914]: pgmap v183: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:50.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:50 vm03.local ceph-mon[47106]: pgmap v183: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:52.366 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:52.536 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:52.536 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 23s ago 4m - - 2026-03-10T12:19:52.536 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 23s ago 4m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:52.536 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 23s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:52.536 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 23s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:52 vm09.local ceph-mon[55914]: pgmap v184: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:52.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:52 vm03.local ceph-mon[47106]: pgmap v184: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:52.758 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:52.758 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:52.758 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:53.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:53 vm09.local ceph-mon[55914]: from='client.15206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:53.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:53 vm09.local ceph-mon[55914]: from='client.15210 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:53.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:53 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/519168932' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:53.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:53 vm03.local ceph-mon[47106]: from='client.15206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:53.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:53 vm03.local ceph-mon[47106]: from='client.15210 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:53.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:53 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/519168932' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:54 vm09.local ceph-mon[55914]: pgmap v185: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:54.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:54 vm03.local ceph-mon[47106]: pgmap v185: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:55.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:55.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:19:56.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:56 vm09.local ceph-mon[55914]: pgmap v186: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:56.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:56 vm03.local ceph-mon[47106]: pgmap v186: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:57.963 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:19:58.134 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:19:58.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 29s ago 4m - - 2026-03-10T12:19:58.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 29s ago 4m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:19:58.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 29s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:19:58.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 29s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:19:58.347 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:19:58.347 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:19:58.347 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:19:58.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:58 vm09.local ceph-mon[55914]: pgmap v187: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:58.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:58 vm09.local ceph-mon[55914]: from='client.15218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:58.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:58 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1335870718' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:58.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:58 vm03.local ceph-mon[47106]: pgmap v187: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:19:58.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:58 vm03.local ceph-mon[47106]: from='client.15218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:58.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:58 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1335870718' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:19:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:19:59 vm09.local ceph-mon[55914]: from='client.15222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:19:59.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:19:59 vm03.local ceph-mon[47106]: from='client.15222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:00.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:00 vm09.local ceph-mon[55914]: pgmap v188: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:00.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:00 vm09.local ceph-mon[55914]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:00.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:00 vm09.local ceph-mon[55914]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:00.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:00 vm09.local ceph-mon[55914]: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:00.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:00 vm03.local ceph-mon[47106]: pgmap v188: 129 pgs: 129 active+clean; 454 KiB data, 231 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:00.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:00 vm03.local ceph-mon[47106]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:00.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:00 vm03.local ceph-mon[47106]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:00.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:00 vm03.local ceph-mon[47106]: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:02.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:02 vm09.local ceph-mon[55914]: pgmap v189: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:02.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:02 vm03.local ceph-mon[47106]: pgmap v189: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:03.564 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:03.732 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:03.732 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 34s ago 4m - - 2026-03-10T12:20:03.732 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 34s ago 4m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:03.732 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 35s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:03.732 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 35s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:03.942 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:03.943 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:03.943 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:04.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:04 vm03.local ceph-mon[47106]: pgmap v190: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:04.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:04 vm03.local ceph-mon[47106]: from='client.15230 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:04.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:04 vm03.local ceph-mon[47106]: from='client.15234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:04.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:04 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3318335340' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:04 vm09.local ceph-mon[55914]: pgmap v190: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:04 vm09.local ceph-mon[55914]: from='client.15230 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:04 vm09.local ceph-mon[55914]: from='client.15234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:04 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3318335340' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:06.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:06 vm09.local ceph-mon[55914]: pgmap v191: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:06.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:06 vm03.local ceph-mon[47106]: pgmap v191: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:08 vm09.local ceph-mon[55914]: pgmap v192: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:08.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:08 vm03.local ceph-mon[47106]: pgmap v192: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:09.138 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:09.314 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:09.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 40s ago 4m - - 2026-03-10T12:20:09.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 40s ago 4m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:09.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 40s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:09.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 40s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:09.535 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:09.535 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:09.535 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:09.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:09 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4019503883' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:09.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:09 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4019503883' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:10 vm09.local ceph-mon[55914]: from='client.15242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:10 vm09.local ceph-mon[55914]: pgmap v193: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:10 vm09.local ceph-mon[55914]: from='client.15246 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:10 vm03.local ceph-mon[47106]: from='client.15242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:10 vm03.local ceph-mon[47106]: pgmap v193: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:10 vm03.local ceph-mon[47106]: from='client.15246 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:10.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:12.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:12 vm03.local ceph-mon[47106]: pgmap v194: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:12 vm09.local ceph-mon[55914]: pgmap v194: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:14.735 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:14.917 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:14.918 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 45s ago 4m - - 2026-03-10T12:20:14.918 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 45s ago 4m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:14.918 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 46s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:14.918 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 46s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:14.918 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:14 vm03.local ceph-mon[47106]: pgmap v195: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:15.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:14 vm09.local ceph-mon[55914]: pgmap v195: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:15.159 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:15.159 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:15.159 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:15.988 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:15 vm03.local ceph-mon[47106]: from='client.15254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:15.988 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:15 vm03.local ceph-mon[47106]: from='client.15258 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:15.988 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:15 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/999186057' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:16.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:15 vm09.local ceph-mon[55914]: from='client.15254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:16.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:15 vm09.local ceph-mon[55914]: from='client.15258 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:16.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:15 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/999186057' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:16 vm09.local ceph-mon[55914]: pgmap v196: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:17.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:16 vm03.local ceph-mon[47106]: pgmap v196: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:19.125 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:18 vm09.local ceph-mon[55914]: pgmap v197: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:19.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:18 vm03.local ceph-mon[47106]: pgmap v197: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:20.359 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:20.539 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:20.540 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 51s ago 4m - - 2026-03-10T12:20:20.540 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 51s ago 4m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:20.540 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 51s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:20.540 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 51s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:20.755 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:20.755 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:20.755 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:21.051 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:20 vm03.local ceph-mon[47106]: pgmap v198: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:21.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:20 vm09.local ceph-mon[55914]: pgmap v198: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:22.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:21 vm09.local ceph-mon[55914]: from='client.15266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:22.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:21 vm09.local ceph-mon[55914]: from='client.15270 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:22.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:21 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1913455830' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:22.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:21 vm03.local ceph-mon[47106]: from='client.15266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:22.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:21 vm03.local ceph-mon[47106]: from='client.15270 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:22.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:21 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1913455830' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:22 vm09.local ceph-mon[55914]: pgmap v199: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:23.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:22 vm03.local ceph-mon[47106]: pgmap v199: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:24 vm09.local ceph-mon[55914]: pgmap v200: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:25.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:24 vm03.local ceph-mon[47106]: pgmap v200: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:25.961 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:26.111 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:26.149 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:26.149 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 57s ago 4m - - 2026-03-10T12:20:26.149 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 57s ago 4m 116M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:26.149 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 57s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:26.149 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 57s ago 4m 109M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:26.355 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:26.355 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:26.355 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:26 vm09.local ceph-mon[55914]: pgmap v201: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:26 vm09.local ceph-mon[55914]: from='client.15278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:26 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/415878301' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:27.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:26 vm03.local ceph-mon[47106]: pgmap v201: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:27.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:26 vm03.local ceph-mon[47106]: from='client.15278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:27.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:26 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/415878301' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:27 vm09.local ceph-mon[55914]: from='client.15282 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:28.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:27 vm03.local ceph-mon[47106]: from='client.15282 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:29.126 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:28 vm09.local ceph-mon[55914]: pgmap v202: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:29.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:28 vm03.local ceph-mon[47106]: pgmap v202: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:29.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:20:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:20:31.034 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:30 vm03.local ceph-mon[47106]: pgmap v203: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:31.034 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:30 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:31.034 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:30 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:30 vm09.local ceph-mon[55914]: pgmap v203: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:30 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:30 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:31.564 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:31.743 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:31.743 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 0s ago 4m - - 2026-03-10T12:20:31.743 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 0s ago 4m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:31.743 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 1s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:31.743 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 1s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:31.952 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:31.953 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:31.953 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: pgmap v204: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: pgmap v205: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='client.15290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='client.24827 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:32 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4230187300' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: pgmap v204: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: pgmap v205: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='client.15290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='client.24827 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:32 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4230187300' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:34.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:34 vm03.local ceph-mon[47106]: pgmap v206: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:34.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:34 vm09.local ceph-mon[55914]: pgmap v206: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:36.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:36 vm03.local ceph-mon[47106]: pgmap v207: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:36 vm09.local ceph-mon[55914]: pgmap v207: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:37.145 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:37.324 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:37.324 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 6s ago 4m - - 2026-03-10T12:20:37.324 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 6s ago 4m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:37.324 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 6s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:37.324 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 6s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:37.547 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:37.547 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:37.547 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:38.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:38 vm09.local ceph-mon[55914]: from='client.15302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:38.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:38 vm09.local ceph-mon[55914]: from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:38.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:38 vm09.local ceph-mon[55914]: pgmap v208: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:38.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:38 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3729960326' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:38.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:38 vm03.local ceph-mon[47106]: from='client.15302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:38.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:38 vm03.local ceph-mon[47106]: from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:38.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:38 vm03.local ceph-mon[47106]: pgmap v208: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:38.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:38 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3729960326' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:40 vm09.local ceph-mon[55914]: pgmap v209: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:40 vm03.local ceph-mon[47106]: pgmap v209: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 303 B/s rd, 607 B/s wr, 0 op/s 2026-03-10T12:20:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:42.750 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:42 vm03.local ceph-mon[47106]: pgmap v210: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:42.750 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:42 vm09.local ceph-mon[55914]: pgmap v210: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:42.919 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:42.919 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 11s ago 4m - - 2026-03-10T12:20:42.919 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 11s ago 4m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:42.920 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 12s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:42.920 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (4m) 12s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:43.137 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:43.137 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:43.137 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:43.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:43 vm09.local ceph-mon[55914]: from='client.15314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:43.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:43 vm09.local ceph-mon[55914]: from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:43.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:43 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1975151797' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:43.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:43 vm03.local ceph-mon[47106]: from='client.15314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:43.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:43 vm03.local ceph-mon[47106]: from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:43.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:43 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1975151797' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:44 vm09.local ceph-mon[55914]: pgmap v211: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:44.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:44 vm03.local ceph-mon[47106]: pgmap v211: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:46.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:46 vm03.local ceph-mon[47106]: pgmap v212: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:46.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:46 vm09.local ceph-mon[55914]: pgmap v212: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:48.324 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:48.500 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:48.500 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 17s ago 5m - - 2026-03-10T12:20:48.500 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 17s ago 4m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:48.500 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (4m) 17s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:48.500 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 17s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:48.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:48 vm03.local ceph-mon[47106]: pgmap v213: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:48.710 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:48.710 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:48.710 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:48 vm09.local ceph-mon[55914]: pgmap v213: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:49.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:49 vm09.local ceph-mon[55914]: from='client.15326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:49.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:49 vm09.local ceph-mon[55914]: from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:49.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:49 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/808738274' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:49.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:49 vm03.local ceph-mon[47106]: from='client.15326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:49.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:49 vm03.local ceph-mon[47106]: from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:49.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:49 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/808738274' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:50 vm09.local ceph-mon[55914]: pgmap v214: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:50 vm03.local ceph-mon[47106]: pgmap v214: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:52.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:52 vm09.local ceph-mon[55914]: pgmap v215: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:52.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:52 vm03.local ceph-mon[47106]: pgmap v215: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:53.911 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:54.093 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:54.093 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 22s ago 5m - - 2026-03-10T12:20:54.093 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 22s ago 5m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:54.093 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 23s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:54.093 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 23s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:54.305 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:54.305 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:54.305 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:20:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:54 vm09.local ceph-mon[55914]: pgmap v216: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:54 vm09.local ceph-mon[55914]: from='client.15338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/560300499' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:54 vm03.local ceph-mon[47106]: pgmap v216: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:54 vm03.local ceph-mon[47106]: from='client.15338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/560300499' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:20:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:55 vm09.local ceph-mon[55914]: from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:55.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:55 vm03.local ceph-mon[47106]: from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:20:55.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:20:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:56 vm09.local ceph-mon[55914]: pgmap v217: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:56.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:56 vm03.local ceph-mon[47106]: pgmap v217: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:20:58 vm09.local ceph-mon[55914]: pgmap v218: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:20:58 vm03.local ceph-mon[47106]: pgmap v218: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:20:59.503 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:20:59.680 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:20:59.680 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 28s ago 5m - - 2026-03-10T12:20:59.680 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 28s ago 5m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:20:59.680 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 29s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:20:59.680 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 29s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:20:59.895 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:20:59.895 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:20:59.895 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:21:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:00 vm09.local ceph-mon[55914]: pgmap v219: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:00 vm09.local ceph-mon[55914]: from='client.15350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:00 vm09.local ceph-mon[55914]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:00 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2972969876' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:00 vm03.local ceph-mon[47106]: pgmap v219: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:00 vm03.local ceph-mon[47106]: from='client.15350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:00 vm03.local ceph-mon[47106]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:00 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2972969876' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:03.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:02 vm09.local ceph-mon[55914]: pgmap v220: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:03.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:02 vm03.local ceph-mon[47106]: pgmap v220: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:05.092 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:04 vm03.local ceph-mon[47106]: pgmap v221: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:05.092 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to stop 2026-03-10T12:21:05.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:04 vm09.local ceph-mon[55914]: pgmap v221: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:05.273 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:05.273 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 34s ago 5m - - 2026-03-10T12:21:05.273 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 34s ago 5m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:21:05.273 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 34s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:05.273 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 34s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:05.495 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:21:05.495 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:21:05.495 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:21:06.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:05 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1755192023' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:06.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:05 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1755192023' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:06.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:06 vm03.local ceph-mon[47106]: from='client.15362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:06.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:06 vm03.local ceph-mon[47106]: from='client.15366 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:06.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:06 vm03.local ceph-mon[47106]: pgmap v222: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:06 vm09.local ceph-mon[55914]: from='client.15362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:06 vm09.local ceph-mon[55914]: from='client.15366 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:06 vm09.local ceph-mon[55914]: pgmap v222: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:09.131 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:08 vm09.local ceph-mon[55914]: pgmap v223: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:09.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:08 vm03.local ceph-mon[47106]: pgmap v223: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:10.098 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:21:10.098 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:21:10.100 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 62333 0 --:--:-- --:--:-- --:--:-- 62333 2026-03-10T12:21:10.280 INFO:teuthology.orchestra.run.vm03.stdout:anonymousScheduled to start rgw.foo.vm03.pqsxbr on host 'vm03' 2026-03-10T12:21:10.529 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.pqsxbr to start 2026-03-10T12:21:10.750 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:10.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 error 39s ago 5m - - 2026-03-10T12:21:10.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 39s ago 5m 120M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:21:10.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 40s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:10.751 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 40s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:10.830 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:10 vm03.local ceph-mon[47106]: pgmap v224: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:10.831 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:10.831 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:10 vm09.local ceph-mon[55914]: pgmap v224: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:10.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:11.003 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:21:11.003 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:21:11.003 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.pqsxbr on vm03 is in error state 2026-03-10T12:21:11.153 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:11.153 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:11.838 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:11 vm03.local ceph-mon[47106]: from='client.15374 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm03.pqsxbr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:12.107 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:11 vm03.local ceph-mon[47106]: Schedule start daemon rgw.foo.vm03.pqsxbr 2026-03-10T12:21:12.107 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:11 vm03.local ceph-mon[47106]: from='client.15378 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:12.107 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:11 vm03.local ceph-mon[47106]: from='client.15382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:12.107 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:11 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3450636254' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:12.107 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:12.107 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:11 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:11 vm09.local ceph-mon[55914]: from='client.15374 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm03.pqsxbr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:11 vm09.local ceph-mon[55914]: Schedule start daemon rgw.foo.vm03.pqsxbr 2026-03-10T12:21:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:11 vm09.local ceph-mon[55914]: from='client.15378 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:11 vm09.local ceph-mon[55914]: from='client.15382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:11 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3450636254' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:12.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:11 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:13.102 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:12 vm03.local ceph-mon[47106]: pgmap v225: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:13.102 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:12 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:13.102 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:12 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:13.102 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:12 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:13.102 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:12 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:13.102 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:12 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:13.102 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:12 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:12 vm09.local ceph-mon[55914]: pgmap v225: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:12 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:12 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:12 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:12 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:12 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:12 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:14.132 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:13 vm09.local ceph-mon[55914]: pgmap v226: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:14.132 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:13 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:14.132 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:13 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:14.132 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:13 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:14.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:13 vm03.local ceph-mon[47106]: pgmap v226: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:14.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:13 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:14.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:13 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:14.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:13 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:15.844 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: pgmap v227: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 6.4 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: pgmap v228: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 43 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: Cluster is now healthy 2026-03-10T12:21:15.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:15 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: pgmap v227: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 6.4 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: pgmap v228: 129 pgs: 129 active+clean; 454 KiB data, 235 MiB used, 160 GiB / 160 GiB avail; 43 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: Cluster is now healthy 2026-03-10T12:21:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:15 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:16.316 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3s) 1s ago 5m 97.8M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:16.498 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled to stop rgw.foo.vm03.yhnrdc on host 'vm03' 2026-03-10T12:21:16.723 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:16.914 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:16.914 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4s) 1s ago 5m 97.8M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:16.914 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 1s ago 5m 123M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:21:16.914 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 2s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:16.914 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 2s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:17.159 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='client.15402 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='client.15406 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm03.yhnrdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: Schedule stop daemon rgw.foo.vm03.yhnrdc 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='client.15410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='client.24901 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: pgmap v229: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 63 KiB/s rd, 0 B/s wr, 97 op/s 2026-03-10T12:21:17.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:17 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2874750927' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:17.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='client.15402 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='client.15406 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm03.yhnrdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: Schedule stop daemon rgw.foo.vm03.yhnrdc 2026-03-10T12:21:17.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:17.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:17.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:17.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='client.15410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='client.24901 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:17.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: pgmap v229: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 63 KiB/s rd, 0 B/s wr, 97 op/s 2026-03-10T12:21:17.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:17 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2874750927' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:19.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:19.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:19.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:19.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:19.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:19.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:19.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:19 vm03.local ceph-mon[47106]: pgmap v230: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 103 op/s 2026-03-10T12:21:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:19 vm09.local ceph-mon[55914]: pgmap v230: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 103 op/s 2026-03-10T12:21:22.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:22 vm03.local ceph-mon[47106]: pgmap v231: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 214 B/s wr, 103 op/s 2026-03-10T12:21:22.378 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:22.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:22 vm09.local ceph-mon[55914]: pgmap v231: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 214 B/s wr, 103 op/s 2026-03-10T12:21:22.546 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:22.546 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9s) 4s ago 5m 98.7M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:22.546 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 4s ago 5m 123M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:21:22.546 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 8s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:22.546 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 8s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:22.768 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:21:23.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:23 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1770015959' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:23 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1770015959' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:24 vm09.local ceph-mon[55914]: from='client.15420 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:24 vm09.local ceph-mon[55914]: from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:24 vm09.local ceph-mon[55914]: pgmap v232: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 190 B/s wr, 92 op/s 2026-03-10T12:21:24.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:24 vm03.local ceph-mon[47106]: from='client.15420 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:24.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:24 vm03.local ceph-mon[47106]: from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:24.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:24 vm03.local ceph-mon[47106]: pgmap v232: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 190 B/s wr, 92 op/s 2026-03-10T12:21:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:26 vm09.local ceph-mon[55914]: pgmap v233: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 190 B/s wr, 82 op/s 2026-03-10T12:21:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:26.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:26 vm03.local ceph-mon[47106]: pgmap v233: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 190 B/s wr, 82 op/s 2026-03-10T12:21:26.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:26.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:27.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:27 vm09.local ceph-mon[55914]: pgmap v234: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 170 B/s wr, 29 op/s 2026-03-10T12:21:27.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:27 vm03.local ceph-mon[47106]: pgmap v234: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 170 B/s wr, 29 op/s 2026-03-10T12:21:27.964 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:28.125 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:28.125 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (15s) 9s ago 5m 98.7M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:28.125 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 9s ago 5m 123M - 19.2.3-678-ge911bdeb 654f31e6858e ef7a65c9e6cf 2026-03-10T12:21:28.125 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 13s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:28.125 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 13s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:28.247 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:28 vm03.local ceph-mon[47106]: from='client.15432 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:28.329 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:21:28.630 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:28 vm09.local ceph-mon[55914]: from='client.15432 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:29 vm09.local ceph-mon[55914]: from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:29 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/674151263' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:29 vm09.local ceph-mon[55914]: pgmap v235: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s rd, 170 B/s wr, 4 op/s 2026-03-10T12:21:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:29.563 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:29 vm03.local ceph-mon[47106]: from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:29.563 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:29 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/674151263' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:29.563 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:29.563 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:29.563 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:29 vm03.local ceph-mon[47106]: pgmap v235: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s rd, 170 B/s wr, 4 op/s 2026-03-10T12:21:29.563 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:21:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:32 vm03.local ceph-mon[47106]: pgmap v236: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:32.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:32 vm09.local ceph-mon[55914]: pgmap v236: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T12:21:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:21:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:21:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:21:33.526 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:33.526 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:33 vm03.local ceph-mon[47106]: pgmap v237: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:33.526 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:33 vm03.local ceph-mon[47106]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T12:21:33.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:33 vm09.local ceph-mon[55914]: pgmap v237: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:33.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:33 vm09.local ceph-mon[55914]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T12:21:33.685 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:33.685 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (20s) 2s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:33.685 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2s ago 5m - - 2026-03-10T12:21:33.685 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 19s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:33.685 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 19s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:33.888 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:21:33.888 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:21:33.888 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:21:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:34 vm09.local ceph-mon[55914]: pgmap v238: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:34 vm09.local ceph-mon[55914]: from='client.15444 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:34 vm09.local ceph-mon[55914]: from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:34 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2998336604' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:34.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:34 vm03.local ceph-mon[47106]: pgmap v238: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:34.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:34 vm03.local ceph-mon[47106]: from='client.15444 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:34.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:34 vm03.local ceph-mon[47106]: from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:34.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:34 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2998336604' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:36.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:36 vm09.local ceph-mon[55914]: pgmap v239: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:36.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:36 vm03.local ceph-mon[47106]: pgmap v239: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:38 vm09.local ceph-mon[55914]: pgmap v240: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:38 vm03.local ceph-mon[47106]: pgmap v240: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:39.080 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:39.249 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:39.289 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (26s) 8s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:39.289 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 8s ago 5m - - 2026-03-10T12:21:39.289 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 24s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:39.289 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 24s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:39.455 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:21:39.455 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:21:39.455 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:21:39.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:39 vm09.local ceph-mon[55914]: from='client.15456 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:39.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:39 vm03.local ceph-mon[47106]: from='client.15456 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:40 vm09.local ceph-mon[55914]: pgmap v241: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:40 vm09.local ceph-mon[55914]: from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:40 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3254086692' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:40.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:40 vm03.local ceph-mon[47106]: pgmap v241: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 400 B/s wr, 0 op/s 2026-03-10T12:21:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:40 vm03.local ceph-mon[47106]: from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:40 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3254086692' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:21:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:42 vm09.local ceph-mon[55914]: pgmap v242: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:42.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:42 vm03.local ceph-mon[47106]: pgmap v242: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:44.647 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:44.807 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:44 vm03.local ceph-mon[47106]: pgmap v243: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:44.807 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:44.807 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (31s) 13s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:44.807 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 13s ago 5m - - 2026-03-10T12:21:44.807 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5m) 30s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:44.807 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5m) 30s ago 5m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:44 vm09.local ceph-mon[55914]: pgmap v243: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:45.012 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:21:45.012 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:21:45.012 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:21:46.061 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:45 vm03.local ceph-mon[47106]: from='client.15468 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:46.061 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:45 vm03.local ceph-mon[47106]: from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:46.061 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:45 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2623565093' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:46.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:45 vm09.local ceph-mon[55914]: from='client.15468 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:46.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:45 vm09.local ceph-mon[55914]: from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:46.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:45 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2623565093' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:47.042 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:46 vm09.local ceph-mon[55914]: pgmap v244: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:47.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:46 vm03.local ceph-mon[47106]: pgmap v244: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:49.135 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:48 vm09.local ceph-mon[55914]: pgmap v245: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:49.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:48 vm03.local ceph-mon[47106]: pgmap v245: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:50.197 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:50.356 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:50.356 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (37s) 19s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:50.356 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 19s ago 6m - - 2026-03-10T12:21:50.356 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 35s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:50.356 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 35s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:50.557 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:21:50.557 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:21:50.557 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:21:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:50 vm03.local ceph-mon[47106]: pgmap v246: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:50.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:50 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3973215671' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:50 vm09.local ceph-mon[55914]: pgmap v246: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:50 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3973215671' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:52.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:51 vm09.local ceph-mon[55914]: from='client.15480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:52.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:51 vm09.local ceph-mon[55914]: from='client.15484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:52.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:51 vm03.local ceph-mon[47106]: from='client.15480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:52.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:51 vm03.local ceph-mon[47106]: from='client.15484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:53.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:52 vm09.local ceph-mon[55914]: pgmap v247: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:53.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:52 vm03.local ceph-mon[47106]: pgmap v247: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:54 vm09.local ceph-mon[55914]: pgmap v248: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:55.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:54 vm03.local ceph-mon[47106]: pgmap v248: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:55.744 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:21:55.920 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:21:55.920 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (43s) 24s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:21:55.920 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 24s ago 6m - - 2026-03-10T12:21:55.920 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 41s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:21:55.920 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 41s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:21:56.118 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:21:56.118 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:21:56.118 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:21:56.118 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:21:57.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:56 vm09.local ceph-mon[55914]: pgmap v249: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:57.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:56 vm09.local ceph-mon[55914]: from='client.15492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:57.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:56 vm09.local ceph-mon[55914]: from='client.15496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:57.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:56 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2416204535' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:57.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:56 vm03.local ceph-mon[47106]: pgmap v249: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:21:57.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:56 vm03.local ceph-mon[47106]: from='client.15492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:57.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:56 vm03.local ceph-mon[47106]: from='client.15496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:21:57.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:56 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2416204535' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:21:59.136 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:21:58 vm09.local ceph-mon[55914]: pgmap v250: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:21:59.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:21:58 vm03.local ceph-mon[47106]: pgmap v250: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:01.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:00 vm09.local ceph-mon[55914]: pgmap v251: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:01.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:00 vm03.local ceph-mon[47106]: pgmap v251: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:01.308 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:01.463 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:01.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (48s) 30s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:01.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 30s ago 6m - - 2026-03-10T12:22:01.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 47s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:01.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 47s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:01.668 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:01.668 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:01.668 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:02.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:01 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1603753160' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:02.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:01 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1603753160' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:03.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:02 vm09.local ceph-mon[55914]: pgmap v252: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:03.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:02 vm09.local ceph-mon[55914]: from='client.15504 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:03.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:02 vm09.local ceph-mon[55914]: from='client.15508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:03.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:02 vm03.local ceph-mon[47106]: pgmap v252: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:03.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:02 vm03.local ceph-mon[47106]: from='client.15504 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:03.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:02 vm03.local ceph-mon[47106]: from='client.15508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:05.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:04 vm09.local ceph-mon[55914]: pgmap v253: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:05.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:04 vm03.local ceph-mon[47106]: pgmap v253: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:06.859 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:07.020 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:07.020 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (54s) 35s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:07.020 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 35s ago 6m - - 2026-03-10T12:22:07.020 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 52s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:07.020 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 52s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:06 vm09.local ceph-mon[55914]: pgmap v254: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:06 vm03.local ceph-mon[47106]: pgmap v254: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:07.215 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:07.215 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:07.215 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:08.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:07 vm09.local ceph-mon[55914]: from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:08.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:07 vm09.local ceph-mon[55914]: from='client.15520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:08.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:07 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4172122668' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:08.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:07 vm03.local ceph-mon[47106]: from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:08.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:07 vm03.local ceph-mon[47106]: from='client.15520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:08.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:07 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4172122668' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:09.137 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:08 vm09.local ceph-mon[55914]: pgmap v255: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:09.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:08 vm03.local ceph-mon[47106]: pgmap v255: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:10.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:10.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:11.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:11 vm09.local ceph-mon[55914]: pgmap v256: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:11.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:11 vm03.local ceph-mon[47106]: pgmap v256: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:12.404 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:12.575 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:12.575 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (59s) 41s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:12.575 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 41s ago 6m - - 2026-03-10T12:22:12.575 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 58s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:12.575 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 58s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:12.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:12 vm09.local ceph-mon[55914]: pgmap v257: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:12.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:12 vm03.local ceph-mon[47106]: pgmap v257: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:12.780 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:12.780 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:12.780 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:13.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:13 vm03.local ceph-mon[47106]: from='client.15528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:13.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:13 vm03.local ceph-mon[47106]: from='client.15532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:13.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:13 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2729601834' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:14.137 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:13 vm09.local ceph-mon[55914]: from='client.15528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:14.137 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:13 vm09.local ceph-mon[55914]: from='client.15532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:14.137 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:13 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2729601834' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:14.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:14 vm03.local ceph-mon[47106]: pgmap v258: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:15.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:14 vm09.local ceph-mon[55914]: pgmap v258: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:16 vm09.local ceph-mon[55914]: pgmap v259: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:17.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:16 vm03.local ceph-mon[47106]: pgmap v259: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:17.966 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:18.118 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:18.118 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (65s) 46s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:18.118 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 46s ago 6m - - 2026-03-10T12:22:18.118 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 63s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:18.118 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 63s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:18.304 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:18.304 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:18.304 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:19.138 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:18 vm09.local ceph-mon[55914]: pgmap v260: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:19.138 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:18 vm09.local ceph-mon[55914]: from='client.15540 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:19.138 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:18 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3035756489' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:19.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:18 vm03.local ceph-mon[47106]: pgmap v260: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:19.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:18 vm03.local ceph-mon[47106]: from='client.15540 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:19.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:18 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3035756489' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:20.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:19 vm09.local ceph-mon[55914]: from='client.15544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:20.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:19 vm03.local ceph-mon[47106]: from='client.15544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:21.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:20 vm09.local ceph-mon[55914]: pgmap v261: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:21.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:20 vm03.local ceph-mon[47106]: pgmap v261: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:22.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:22 vm03.local ceph-mon[47106]: pgmap v262: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:22 vm09.local ceph-mon[55914]: pgmap v262: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:23.489 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:23.647 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:23.647 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (70s) 52s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:23.647 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 52s ago 6m - - 2026-03-10T12:22:23.647 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 69s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:23.647 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 69s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:23.843 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:23.843 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:23.843 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:24 vm09.local ceph-mon[55914]: pgmap v263: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:24 vm09.local ceph-mon[55914]: from='client.15552 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:24 vm09.local ceph-mon[55914]: from='client.15556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:24 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2480827680' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:25.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:24 vm03.local ceph-mon[47106]: pgmap v263: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:25.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:24 vm03.local ceph-mon[47106]: from='client.15552 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:25.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:24 vm03.local ceph-mon[47106]: from='client.15556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:25.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:24 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2480827680' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:26.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:26 vm09.local ceph-mon[55914]: pgmap v264: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:27.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:26 vm03.local ceph-mon[47106]: pgmap v264: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:29.028 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:29.178 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:29.178 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (76s) 58s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:29.178 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 58s ago 6m - - 2026-03-10T12:22:29.178 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 74s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:29.178 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 74s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:29.371 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:28 vm03.local ceph-mon[47106]: pgmap v265: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:29.372 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:29.372 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:29.372 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:29 vm09.local ceph-mon[55914]: pgmap v265: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:29 vm09.local ceph-mon[55914]: from='client.15564 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:29 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/995739098' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:30.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:29 vm03.local ceph-mon[47106]: from='client.15564 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:30.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:29 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/995739098' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:31.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:30 vm09.local ceph-mon[55914]: pgmap v266: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:31.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:30 vm09.local ceph-mon[55914]: from='client.15568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:31.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:30 vm03.local ceph-mon[47106]: pgmap v266: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:30 vm03.local ceph-mon[47106]: from='client.15568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:32.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:31 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:22:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:31 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:22:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:32 vm09.local ceph-mon[55914]: pgmap v267: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:22:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:32 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:22:33.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:32 vm03.local ceph-mon[47106]: pgmap v267: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:22:33.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:22:33.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:32 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:22:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:22:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:22:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:34 vm09.local ceph-mon[55914]: pgmap v268: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-03-10T12:22:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:34 vm09.local ceph-mon[55914]: pgmap v269: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:22:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:22:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:22:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:22:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:34 vm03.local ceph-mon[47106]: pgmap v268: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-03-10T12:22:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:34 vm03.local ceph-mon[47106]: pgmap v269: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:22:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:22:34.546 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:34.696 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:34.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (81s) 1s ago 6m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:34.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 1s ago 6m - - 2026-03-10T12:22:34.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 80s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:34.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 80s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:34.881 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:34.881 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:34.881 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:35 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2202275373' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:35 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2202275373' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:36.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:36 vm09.local ceph-mon[55914]: from='client.15576 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:36.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:36 vm09.local ceph-mon[55914]: from='client.15580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:36.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:36 vm09.local ceph-mon[55914]: pgmap v270: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:36.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:36 vm03.local ceph-mon[47106]: from='client.15576 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:36.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:36 vm03.local ceph-mon[47106]: from='client.15580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:36.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:36 vm03.local ceph-mon[47106]: pgmap v270: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:37.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:37 vm09.local ceph-mon[55914]: pgmap v271: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:37.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:37 vm03.local ceph-mon[47106]: pgmap v271: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:39 vm09.local ceph-mon[55914]: pgmap v272: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:39.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:39 vm03.local ceph-mon[47106]: pgmap v272: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 207 B/s rd, 415 B/s wr, 0 op/s 2026-03-10T12:22:40.070 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:40.234 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:40.234 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (87s) 7s ago 6m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:40.234 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 7s ago 6m - - 2026-03-10T12:22:40.234 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 85s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:40.234 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 85s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:40 vm09.local ceph-mon[55914]: from='client.15588 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:40.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:40.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:40 vm03.local ceph-mon[47106]: from='client.15588 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:40.432 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:40.432 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:40.432 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:41.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:41 vm03.local ceph-mon[47106]: from='client.15592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:41.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:41 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1774432531' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:41.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:41 vm03.local ceph-mon[47106]: pgmap v273: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:41.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:41 vm09.local ceph-mon[55914]: from='client.15592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:41.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:41 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1774432531' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:41.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:41 vm09.local ceph-mon[55914]: pgmap v273: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:43 vm09.local ceph-mon[55914]: pgmap v274: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:43.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:43 vm03.local ceph-mon[47106]: pgmap v274: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:45 vm09.local ceph-mon[55914]: pgmap v275: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:45.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:45 vm03.local ceph-mon[47106]: pgmap v275: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:45.604 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:45.755 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:45.755 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (92s) 12s ago 6m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:45.755 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 12s ago 6m - - 2026-03-10T12:22:45.755 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (6m) 91s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:45.755 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (6m) 91s ago 6m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:45.945 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:45.945 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:45.945 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:46 vm09.local ceph-mon[55914]: from='client.15600 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:46 vm09.local ceph-mon[55914]: from='client.15604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:46 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2959880247' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:46 vm03.local ceph-mon[47106]: from='client.15600 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:46 vm03.local ceph-mon[47106]: from='client.15604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:46 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2959880247' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:47.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:47 vm03.local ceph-mon[47106]: pgmap v276: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:47 vm09.local ceph-mon[55914]: pgmap v276: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:49 vm09.local ceph-mon[55914]: pgmap v277: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:49.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:49 vm03.local ceph-mon[47106]: pgmap v277: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:51.146 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:51.316 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:51.317 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (98s) 18s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:51.317 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 18s ago 7m - - 2026-03-10T12:22:51.317 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 96s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:51.317 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 96s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:51 vm09.local ceph-mon[55914]: pgmap v278: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:51 vm03.local ceph-mon[47106]: pgmap v278: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:51.520 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:51.520 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:51.520 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:52.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:52 vm03.local ceph-mon[47106]: from='client.15612 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:52.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:52 vm03.local ceph-mon[47106]: from='client.15616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:52.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:52 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/953381153' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:52 vm09.local ceph-mon[55914]: from='client.15612 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:52 vm09.local ceph-mon[55914]: from='client.15616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:52.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:52 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/953381153' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:53.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:53 vm03.local ceph-mon[47106]: pgmap v279: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:53.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:53 vm09.local ceph-mon[55914]: pgmap v279: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:22:56.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:55 vm09.local ceph-mon[55914]: pgmap v280: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:56.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:55 vm03.local ceph-mon[47106]: pgmap v280: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:22:56.698 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:22:56.852 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:22:56.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (104s) 24s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:22:56.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 24s ago 7m - - 2026-03-10T12:22:56.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 102s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:22:56.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 102s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:22:57.044 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:22:57.044 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:22:57.044 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:22:57.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:57 vm09.local ceph-mon[55914]: from='client.15624 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:57.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:57 vm09.local ceph-mon[55914]: from='client.15628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:57.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:57 vm09.local ceph-mon[55914]: pgmap v281: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:57.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:57 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2525397638' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:57.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:57 vm03.local ceph-mon[47106]: from='client.15624 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:57.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:57 vm03.local ceph-mon[47106]: from='client.15628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:22:57.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:57 vm03.local ceph-mon[47106]: pgmap v281: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:57.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:57 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2525397638' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:22:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:22:59 vm09.local ceph-mon[55914]: pgmap v282: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:22:59.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:22:59 vm03.local ceph-mon[47106]: pgmap v282: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:01.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:01 vm09.local ceph-mon[55914]: pgmap v283: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:01 vm03.local ceph-mon[47106]: pgmap v283: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:02.239 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:02.399 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:02.399 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (109s) 29s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:02.399 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 29s ago 7m - - 2026-03-10T12:23:02.399 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 108s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:02.399 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 108s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:02.593 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:02.593 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:02.593 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:02.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:02 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/696654928' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:03.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:02 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/696654928' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:04.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:03 vm09.local ceph-mon[55914]: from='client.15636 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:04.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:03 vm09.local ceph-mon[55914]: from='client.15640 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:04.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:03 vm09.local ceph-mon[55914]: pgmap v284: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:04.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:03 vm03.local ceph-mon[47106]: from='client.15636 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:04.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:03 vm03.local ceph-mon[47106]: from='client.15640 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:04.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:03 vm03.local ceph-mon[47106]: pgmap v284: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:05 vm09.local ceph-mon[55914]: pgmap v285: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:05.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:05 vm03.local ceph-mon[47106]: pgmap v285: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:07 vm09.local ceph-mon[55914]: pgmap v286: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:07.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:07 vm03.local ceph-mon[47106]: pgmap v286: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:07.773 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:07.927 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:07.927 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (115s) 35s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:07.927 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 35s ago 7m - - 2026-03-10T12:23:07.927 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 113s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:07.927 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 113s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:08.122 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:08.122 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:08.122 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:08 vm03.local ceph-mon[47106]: from='client.15648 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:08 vm03.local ceph-mon[47106]: from='client.15652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:08 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1673285468' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:08.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:08 vm09.local ceph-mon[55914]: from='client.15648 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:08.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:08 vm09.local ceph-mon[55914]: from='client.15652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:08.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:08 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1673285468' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:09 vm03.local ceph-mon[47106]: pgmap v287: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:09.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:09 vm09.local ceph-mon[55914]: pgmap v287: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:10.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:10.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:11.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:11 vm03.local ceph-mon[47106]: pgmap v288: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:11.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:11 vm09.local ceph-mon[55914]: pgmap v288: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:13.336 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:13.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:13 vm09.local ceph-mon[55914]: pgmap v289: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:13.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:13 vm03.local ceph-mon[47106]: pgmap v289: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:13.511 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:13.511 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 40s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:13.511 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 40s ago 7m - - 2026-03-10T12:23:13.511 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 119s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:13.511 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 119s ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:13.728 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:13.728 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:13.728 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:14.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:14 vm03.local ceph-mon[47106]: from='client.15660 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:14.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:14 vm03.local ceph-mon[47106]: from='client.15664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:14.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:14 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4138589062' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:14.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:14 vm09.local ceph-mon[55914]: from='client.15660 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:14.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:14 vm09.local ceph-mon[55914]: from='client.15664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:14.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:14 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4138589062' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:15.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:15 vm03.local ceph-mon[47106]: pgmap v290: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:15.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:15 vm09.local ceph-mon[55914]: pgmap v290: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:17 vm09.local ceph-mon[55914]: pgmap v291: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:17.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:17 vm03.local ceph-mon[47106]: pgmap v291: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:18.913 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:19.069 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:19.069 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 46s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:19.069 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 46s ago 7m - - 2026-03-10T12:23:19.069 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:19.069 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:19.279 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:19 vm03.local ceph-mon[47106]: from='client.15672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:19.279 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:19 vm03.local ceph-mon[47106]: pgmap v292: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:19.279 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:19 vm03.local ceph-mon[47106]: from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:19.280 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:19.280 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:19.280 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:19.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:19 vm09.local ceph-mon[55914]: from='client.15672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:19.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:19 vm09.local ceph-mon[55914]: pgmap v292: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:19.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:19 vm09.local ceph-mon[55914]: from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:20.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:20 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1392106858' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:20.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:20 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1392106858' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:21.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:21 vm03.local ceph-mon[47106]: pgmap v293: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:21.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:21 vm09.local ceph-mon[55914]: pgmap v293: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:23 vm09.local ceph-mon[55914]: pgmap v294: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:23.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:23 vm03.local ceph-mon[47106]: pgmap v294: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:24.463 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:24.620 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:24.620 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 51s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:24.620 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 51s ago 7m - - 2026-03-10T12:23:24.620 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:24.620 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:24.813 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:24.813 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:24.813 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:24 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3356306357' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:25.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:24 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3356306357' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:25 vm09.local ceph-mon[55914]: from='client.15684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:25 vm09.local ceph-mon[55914]: from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:25 vm09.local ceph-mon[55914]: pgmap v295: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:26.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:25 vm03.local ceph-mon[47106]: from='client.15684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:26.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:25 vm03.local ceph-mon[47106]: from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:26.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:26.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:25 vm03.local ceph-mon[47106]: pgmap v295: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:27.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:27 vm03.local ceph-mon[47106]: pgmap v296: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:27.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:27 vm09.local ceph-mon[55914]: pgmap v296: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:29.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:29 vm03.local ceph-mon[47106]: pgmap v297: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:29.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:29 vm09.local ceph-mon[55914]: pgmap v297: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:30.002 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:30.163 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:30.163 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 57s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:30.163 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 57s ago 7m - - 2026-03-10T12:23:30.163 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:30.163 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:30.358 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:30.358 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:30.359 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:30.359 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:30 vm03.local ceph-mon[47106]: from='client.15696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:30.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:30 vm09.local ceph-mon[55914]: from='client.15696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:31 vm03.local ceph-mon[47106]: from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:31 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1927223461' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:31.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:31 vm03.local ceph-mon[47106]: pgmap v298: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:31.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:31 vm09.local ceph-mon[55914]: from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:31.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:31 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1927223461' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:31.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:31 vm09.local ceph-mon[55914]: pgmap v298: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:33.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:33 vm03.local ceph-mon[47106]: pgmap v299: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:33.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:33 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:23:33.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:33 vm09.local ceph-mon[55914]: pgmap v299: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:23:33.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:33 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:23:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:23:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:23:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:34 vm03.local ceph-mon[47106]: pgmap v300: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:23:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:23:34.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:23:34.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:23:34.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:34 vm09.local ceph-mon[55914]: pgmap v300: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:34.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:23:34.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:23:35.539 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:35.689 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:35.689 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 62s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:35.689 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 62s ago 7m - - 2026-03-10T12:23:35.689 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:35.689 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:35.875 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:35.875 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:35.875 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:36 vm09.local ceph-mon[55914]: pgmap v301: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:36 vm09.local ceph-mon[55914]: from='client.15708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:36 vm09.local ceph-mon[55914]: from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:36 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4144000924' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:36 vm03.local ceph-mon[47106]: pgmap v301: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:36 vm03.local ceph-mon[47106]: from='client.15708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:36 vm03.local ceph-mon[47106]: from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:36 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4144000924' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:38.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:38 vm09.local ceph-mon[55914]: pgmap v302: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:38.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:38 vm03.local ceph-mon[47106]: pgmap v302: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:40 vm09.local ceph-mon[55914]: pgmap v303: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:40 vm03.local ceph-mon[47106]: pgmap v303: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 198 B/s rd, 396 B/s wr, 0 op/s 2026-03-10T12:23:40.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:41.064 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:41.239 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:41.240 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 68s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:41.240 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 68s ago 7m - - 2026-03-10T12:23:41.240 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:41.240 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:41.441 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:41.441 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:41.441 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:41.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:41 vm09.local ceph-mon[55914]: from='client.15720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:41.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:41 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1053435154' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:41.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:41 vm03.local ceph-mon[47106]: from='client.15720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:41.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:41 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1053435154' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:42 vm09.local ceph-mon[55914]: from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:42 vm09.local ceph-mon[55914]: pgmap v304: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:42.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:42 vm03.local ceph-mon[47106]: from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:42.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:42 vm03.local ceph-mon[47106]: pgmap v304: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:44 vm09.local ceph-mon[55914]: pgmap v305: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:44.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:44 vm03.local ceph-mon[47106]: pgmap v305: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:46.635 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:46.801 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:46.801 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 74s ago 7m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:46.801 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 74s ago 7m - - 2026-03-10T12:23:46.801 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:46.801 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (7m) 2m ago 7m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:46.801 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:46 vm03.local ceph-mon[47106]: pgmap v306: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:46 vm09.local ceph-mon[55914]: pgmap v306: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:46.994 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:46.994 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:46.994 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:47.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:47 vm09.local ceph-mon[55914]: from='client.25081 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:47.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:47 vm09.local ceph-mon[55914]: from='client.25085 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:47.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:47 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1593688451' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:47.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:47 vm03.local ceph-mon[47106]: from='client.25081 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:47.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:47 vm03.local ceph-mon[47106]: from='client.25085 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:47.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:47 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1593688451' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:48 vm09.local ceph-mon[55914]: pgmap v307: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:48.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:48 vm03.local ceph-mon[47106]: pgmap v307: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:50 vm09.local ceph-mon[55914]: pgmap v308: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:50.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:50 vm03.local ceph-mon[47106]: pgmap v308: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:52.186 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:52.346 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:52.346 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 79s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:52.346 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 79s ago 8m - - 2026-03-10T12:23:52.346 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:52.346 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:52.485 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:52 vm03.local ceph-mon[47106]: pgmap v309: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:52.540 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:52.541 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:52.541 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:52.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:52 vm09.local ceph-mon[55914]: pgmap v309: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:53.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:53 vm09.local ceph-mon[55914]: from='client.15744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:53.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:53 vm09.local ceph-mon[55914]: from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:53.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:53 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3133851462' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:53.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:53 vm03.local ceph-mon[47106]: from='client.15744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:53.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:53 vm03.local ceph-mon[47106]: from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:53.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:53 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3133851462' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:54 vm09.local ceph-mon[55914]: pgmap v310: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:54 vm03.local ceph-mon[47106]: pgmap v310: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:55.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:23:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:56 vm09.local ceph-mon[55914]: pgmap v311: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:56.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:56 vm03.local ceph-mon[47106]: pgmap v311: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:23:57.719 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:23:57.874 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:23:57.874 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 85s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:23:57.874 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 85s ago 8m - - 2026-03-10T12:23:57.874 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:23:57.875 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:23:58.078 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:23:58.079 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:23:58.079 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:23:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:58 vm09.local ceph-mon[55914]: pgmap v312: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:58 vm09.local ceph-mon[55914]: from='client.15756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:58 vm09.local ceph-mon[55914]: from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:23:58 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3165620042' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:23:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:58 vm03.local ceph-mon[47106]: pgmap v312: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:23:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:58 vm03.local ceph-mon[47106]: from='client.15756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:58 vm03.local ceph-mon[47106]: from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:23:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:23:58 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3165620042' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:00 vm09.local ceph-mon[55914]: pgmap v313: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:00 vm03.local ceph-mon[47106]: pgmap v313: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:02 vm09.local ceph-mon[55914]: pgmap v314: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:02.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:02 vm03.local ceph-mon[47106]: pgmap v314: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:03.273 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:03.437 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:03.437 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 90s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:03.437 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 90s ago 8m - - 2026-03-10T12:24:03.437 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:03.437 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:03.641 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:03.641 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:03.641 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:04 vm09.local ceph-mon[55914]: from='client.15768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:04 vm09.local ceph-mon[55914]: pgmap v315: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:04 vm09.local ceph-mon[55914]: from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:04 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1739278706' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:04.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:04 vm03.local ceph-mon[47106]: from='client.15768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:04.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:04 vm03.local ceph-mon[47106]: pgmap v315: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:04.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:04 vm03.local ceph-mon[47106]: from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:04.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:04 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1739278706' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:06.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:06 vm09.local ceph-mon[55914]: pgmap v316: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:06.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:06 vm03.local ceph-mon[47106]: pgmap v316: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:08.825 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:08.825 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:08 vm03.local ceph-mon[47106]: pgmap v317: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:08 vm09.local ceph-mon[55914]: pgmap v317: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:09.004 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:09.004 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (2m) 96s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:09.004 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 96s ago 8m - - 2026-03-10T12:24:09.004 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:09.004 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 2m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:09.204 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:09.204 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:09.204 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:09.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:09 vm09.local ceph-mon[55914]: from='client.25109 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:09.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:09 vm09.local ceph-mon[55914]: from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:09.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:09 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1822835305' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:09.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:09 vm03.local ceph-mon[47106]: from='client.25109 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:09.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:09 vm03.local ceph-mon[47106]: from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:09.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:09 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1822835305' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:10 vm09.local ceph-mon[55914]: pgmap v318: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:10.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:10 vm03.local ceph-mon[47106]: pgmap v318: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:10.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:12.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:12 vm09.local ceph-mon[55914]: pgmap v319: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:12.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:12 vm03.local ceph-mon[47106]: pgmap v319: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:14.394 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:14.562 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:14.563 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 101s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:14.563 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 101s ago 8m - - 2026-03-10T12:24:14.563 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:14.563 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:14.766 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:14.766 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:14.766 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:14 vm09.local ceph-mon[55914]: pgmap v320: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:14.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:14 vm03.local ceph-mon[47106]: pgmap v320: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:15.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:15 vm09.local ceph-mon[55914]: from='client.15792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:15.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:15 vm09.local ceph-mon[55914]: from='client.15796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:15.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:15 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/18020455' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:15.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:15 vm03.local ceph-mon[47106]: from='client.15792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:15 vm03.local ceph-mon[47106]: from='client.15796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:15.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:15 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/18020455' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:16.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:16 vm09.local ceph-mon[55914]: pgmap v321: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:16.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:16 vm03.local ceph-mon[47106]: pgmap v321: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:18.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:18 vm09.local ceph-mon[55914]: pgmap v322: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:18.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:18 vm03.local ceph-mon[47106]: pgmap v322: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:19.942 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:20.109 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:20.109 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 107s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:20.109 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 107s ago 8m - - 2026-03-10T12:24:20.109 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:20.109 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:20.297 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:20.297 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:20.297 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:20.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:20 vm09.local ceph-mon[55914]: pgmap v323: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:20.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:20 vm09.local ceph-mon[55914]: from='client.15804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:20.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:20 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4097267323' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:20.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:20 vm03.local ceph-mon[47106]: pgmap v323: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:20.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:20 vm03.local ceph-mon[47106]: from='client.15804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:20.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:20 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4097267323' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:21.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:21 vm09.local ceph-mon[55914]: from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:21.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:21 vm03.local ceph-mon[47106]: from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:22.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:22 vm09.local ceph-mon[55914]: pgmap v324: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:22.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:22 vm03.local ceph-mon[47106]: pgmap v324: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:24.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:24 vm09.local ceph-mon[55914]: pgmap v325: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:24.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:24 vm03.local ceph-mon[47106]: pgmap v325: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:25.482 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:25.639 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:25.639 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 112s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:25.639 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 112s ago 8m - - 2026-03-10T12:24:25.639 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:25.639 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:25.849 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:25.850 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:25.850 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:25.850 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:26.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:26 vm03.local ceph-mon[47106]: pgmap v326: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:26.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:26 vm03.local ceph-mon[47106]: from='client.15816 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:26.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:26 vm03.local ceph-mon[47106]: from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:26.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:26 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3733564135' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:26 vm09.local ceph-mon[55914]: pgmap v326: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:26 vm09.local ceph-mon[55914]: from='client.15816 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:26 vm09.local ceph-mon[55914]: from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:27.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:26 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3733564135' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:29.070 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:28 vm03.local ceph-mon[47106]: pgmap v327: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:29.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:28 vm09.local ceph-mon[55914]: pgmap v327: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:31.031 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:31.031 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:30 vm03.local ceph-mon[47106]: pgmap v328: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:30 vm09.local ceph-mon[55914]: pgmap v328: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:31.188 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:31.189 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 118s ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:31.189 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 118s ago 8m - - 2026-03-10T12:24:31.189 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:31.189 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:31.379 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:31.379 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:31.379 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:32.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:31 vm09.local ceph-mon[55914]: from='client.15828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:32.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:31 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/889407721' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:32.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:31 vm03.local ceph-mon[47106]: from='client.15828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:32.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:31 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/889407721' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:33.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:32 vm09.local ceph-mon[55914]: from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:33.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:32 vm09.local ceph-mon[55914]: pgmap v329: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:33.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:32 vm03.local ceph-mon[47106]: from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:33.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:32 vm03.local ceph-mon[47106]: pgmap v329: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:34.126 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:33 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:24:34.126 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:33 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:24:34.126 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:33 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:24:34.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:33 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:24:34.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:33 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:24:34.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:33 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:24:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:34 vm09.local ceph-mon[55914]: pgmap v330: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:24:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:24:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:34 vm09.local ceph-mon[55914]: pgmap v331: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:24:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:24:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:34 vm03.local ceph-mon[47106]: pgmap v330: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:24:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:24:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:24:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:34 vm03.local ceph-mon[47106]: pgmap v331: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:24:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:24:36.558 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:36.714 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:36.714 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 2m ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:36.714 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 8m - - 2026-03-10T12:24:36.714 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:36.714 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:36.917 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:36.918 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:36.918 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:36 vm09.local ceph-mon[55914]: pgmap v332: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:36 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3003865016' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:37.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:36 vm03.local ceph-mon[47106]: pgmap v332: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:37.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:36 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3003865016' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:37 vm09.local ceph-mon[55914]: from='client.15840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:37 vm09.local ceph-mon[55914]: from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:38.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:37 vm03.local ceph-mon[47106]: from='client.15840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:38.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:37 vm03.local ceph-mon[47106]: from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:39 vm09.local ceph-mon[55914]: pgmap v333: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:39.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:38 vm03.local ceph-mon[47106]: pgmap v333: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:40.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:41.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:41 vm09.local ceph-mon[55914]: pgmap v334: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:41.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:41 vm03.local ceph-mon[47106]: pgmap v334: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 194 B/s rd, 389 B/s wr, 0 op/s 2026-03-10T12:24:42.113 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:42.274 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:42.274 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 2m ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:42.274 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 8m - - 2026-03-10T12:24:42.274 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:42.274 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:42.475 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:42.475 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:42.475 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:43 vm09.local ceph-mon[55914]: pgmap v335: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:43 vm09.local ceph-mon[55914]: from='client.15852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:43 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/142646253' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:43 vm03.local ceph-mon[47106]: pgmap v335: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:43 vm03.local ceph-mon[47106]: from='client.15852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:43 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/142646253' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:44 vm09.local ceph-mon[55914]: from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:44.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:44 vm03.local ceph-mon[47106]: from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:45 vm09.local ceph-mon[55914]: pgmap v336: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:45.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:45 vm03.local ceph-mon[47106]: pgmap v336: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:47 vm09.local ceph-mon[55914]: pgmap v337: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:47.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:47 vm03.local ceph-mon[47106]: pgmap v337: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:47.656 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:47.813 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:47.813 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 2m ago 8m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:47.813 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 8m - - 2026-03-10T12:24:47.813 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (8m) 3m ago 8m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:47.813 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:48.001 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:48.001 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:48.001 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:48 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1497572517' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:48.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:48 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1497572517' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:49.303 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:49 vm03.local ceph-mon[47106]: from='client.15864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:49.303 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:49 vm03.local ceph-mon[47106]: from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:49.303 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:49 vm03.local ceph-mon[47106]: pgmap v338: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:49 vm09.local ceph-mon[55914]: from='client.15864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:49 vm09.local ceph-mon[55914]: from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:49 vm09.local ceph-mon[55914]: pgmap v338: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:51.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:51 vm09.local ceph-mon[55914]: pgmap v339: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:51.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:51 vm03.local ceph-mon[47106]: pgmap v339: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:53.188 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:53.340 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:53 vm03.local ceph-mon[47106]: pgmap v340: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:53.341 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:53.341 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:53.341 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:24:53.341 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:53.341 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:53.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:53 vm09.local ceph-mon[55914]: pgmap v340: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:53.524 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:53.524 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:53.524 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:54.357 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1051378475' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1051378475' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:24:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:55 vm09.local ceph-mon[55914]: from='client.15876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:55 vm09.local ceph-mon[55914]: from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:55 vm09.local ceph-mon[55914]: pgmap v341: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:55 vm03.local ceph-mon[47106]: from='client.15876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:55 vm03.local ceph-mon[47106]: from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:24:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:55 vm03.local ceph-mon[47106]: pgmap v341: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:24:56.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:56 vm03.local ceph-mon[47106]: pgmap v342: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:56.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:56 vm09.local ceph-mon[55914]: pgmap v342: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:24:58.702 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:24:58.852 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:24:58.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:24:58.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:24:58.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:24:58.852 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:24:58.962 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:58 vm03.local ceph-mon[47106]: pgmap v343: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:24:59.042 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:24:59.042 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:24:59.042 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:24:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:58 vm09.local ceph-mon[55914]: pgmap v343: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:59 vm09.local ceph-mon[55914]: from='client.15888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:59 vm09.local ceph-mon[55914]: from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:24:59 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1915832623' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:00.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:59 vm03.local ceph-mon[47106]: from='client.15888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:00.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:59 vm03.local ceph-mon[47106]: from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:00.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:24:59 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1915832623' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:01.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:00 vm09.local ceph-mon[55914]: pgmap v344: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:01.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:00 vm03.local ceph-mon[47106]: pgmap v344: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:02 vm09.local ceph-mon[55914]: pgmap v345: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:03.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:02 vm03.local ceph-mon[47106]: pgmap v345: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:04.233 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:04.407 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:04.407 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:04.407 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:25:04.407 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:04.407 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:04.607 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:04.607 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:04.607 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:04 vm09.local ceph-mon[55914]: pgmap v346: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:04 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1165702874' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:05.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:04 vm03.local ceph-mon[47106]: pgmap v346: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:05.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:04 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1165702874' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:05 vm09.local ceph-mon[55914]: from='client.15900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:05 vm09.local ceph-mon[55914]: from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:05 vm03.local ceph-mon[47106]: from='client.15900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:05 vm03.local ceph-mon[47106]: from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:07.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:07 vm09.local ceph-mon[55914]: pgmap v347: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:07.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:07 vm03.local ceph-mon[47106]: pgmap v347: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:09 vm09.local ceph-mon[55914]: pgmap v348: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:09 vm03.local ceph-mon[47106]: pgmap v348: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:09.786 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:09.944 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:09.944 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (3m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:09.944 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:25:09.944 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:09.944 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 3m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:10.137 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:10.137 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:10.137 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:10.137 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:11 vm09.local ceph-mon[55914]: from='client.25185 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:11 vm09.local ceph-mon[55914]: pgmap v349: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:11 vm09.local ceph-mon[55914]: from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:11 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3555819846' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:11.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:11 vm03.local ceph-mon[47106]: from='client.25185 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:11.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:11 vm03.local ceph-mon[47106]: pgmap v349: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:11.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:11 vm03.local ceph-mon[47106]: from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:11.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:11 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3555819846' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:13.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:13 vm09.local ceph-mon[55914]: pgmap v350: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:13.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:13 vm03.local ceph-mon[47106]: pgmap v350: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:15.306 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:15.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:15 vm03.local ceph-mon[47106]: pgmap v351: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:15.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:15 vm09.local ceph-mon[55914]: pgmap v351: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:15.462 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:15.462 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:15.462 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:25:15.462 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:15.462 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:15.648 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:15.648 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:15.648 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:16.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:16 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1901410055' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:16.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:16 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1901410055' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:17 vm09.local ceph-mon[55914]: from='client.25191 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:17 vm09.local ceph-mon[55914]: from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:17.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:17 vm09.local ceph-mon[55914]: pgmap v352: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:17 vm03.local ceph-mon[47106]: from='client.25191 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:17 vm03.local ceph-mon[47106]: from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:17.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:17 vm03.local ceph-mon[47106]: pgmap v352: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:19.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:19 vm09.local ceph-mon[55914]: pgmap v353: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:19.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:19 vm03.local ceph-mon[47106]: pgmap v353: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:20.830 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:20.982 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:20.982 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:20.982 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:25:20.982 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:20.982 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:21.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:21 vm03.local ceph-mon[47106]: pgmap v354: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:21.179 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:21.180 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:21.180 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:21.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:21 vm09.local ceph-mon[55914]: pgmap v354: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:22.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:22 vm09.local ceph-mon[55914]: from='client.15936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:22.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:22 vm09.local ceph-mon[55914]: from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:22.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:22 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/57976181' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:22.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:22 vm03.local ceph-mon[47106]: from='client.15936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:22.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:22 vm03.local ceph-mon[47106]: from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:22.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:22 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/57976181' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:23 vm09.local ceph-mon[55914]: pgmap v355: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:23.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:23 vm03.local ceph-mon[47106]: pgmap v355: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:25.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:25 vm09.local ceph-mon[55914]: pgmap v356: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:25.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:25.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:25 vm03.local ceph-mon[47106]: pgmap v356: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:25.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:26.357 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:26.506 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:26.506 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:26.506 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:25:26.506 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:26.506 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:26.691 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:26.691 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:26.691 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:27.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:27 vm09.local ceph-mon[55914]: pgmap v357: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:27.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:27 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3736057326' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:27.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:27 vm03.local ceph-mon[47106]: pgmap v357: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:27.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:27 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3736057326' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:28 vm09.local ceph-mon[55914]: from='client.15948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:28 vm09.local ceph-mon[55914]: from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:28 vm03.local ceph-mon[47106]: from='client.15948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:28.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:28 vm03.local ceph-mon[47106]: from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:29 vm09.local ceph-mon[55914]: pgmap v358: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:29.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:29 vm03.local ceph-mon[47106]: pgmap v358: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:30.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:30 vm03.local ceph-mon[47106]: pgmap v359: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:30.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:30 vm09.local ceph-mon[55914]: pgmap v359: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:31.859 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:32.009 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:32.009 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 2m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:32.009 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 2m ago 9m - - 2026-03-10T12:25:32.009 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:32.009 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:32.198 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:32.198 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:32.198 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:32 vm09.local ceph-mon[55914]: from='client.15960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:32 vm09.local ceph-mon[55914]: pgmap v360: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:32 vm09.local ceph-mon[55914]: from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:32 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4091230261' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:33.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:32 vm03.local ceph-mon[47106]: from='client.15960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:33.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:32 vm03.local ceph-mon[47106]: pgmap v360: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:33.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:32 vm03.local ceph-mon[47106]: from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:33.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:32 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4091230261' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:25:34.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: pgmap v361: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:25:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: pgmap v361: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:25:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:25:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:36 vm09.local ceph-mon[55914]: pgmap v362: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 392 B/s wr, 0 op/s 2026-03-10T12:25:36.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:36 vm03.local ceph-mon[47106]: pgmap v362: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 392 B/s wr, 0 op/s 2026-03-10T12:25:37.371 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:37.521 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:37.521 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 3m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:37.521 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 9m - - 2026-03-10T12:25:37.521 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:37.521 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:37.703 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:37.703 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:37.703 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:38 vm09.local ceph-mon[55914]: pgmap v363: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 392 B/s wr, 0 op/s 2026-03-10T12:25:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:38 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/271309618' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:38.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:38 vm03.local ceph-mon[47106]: pgmap v363: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 392 B/s wr, 0 op/s 2026-03-10T12:25:38.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:38 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/271309618' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:39 vm09.local ceph-mon[55914]: from='client.15972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:39.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:39 vm09.local ceph-mon[55914]: from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:39.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:39 vm03.local ceph-mon[47106]: from='client.15972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:39.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:39 vm03.local ceph-mon[47106]: from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:40.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:40 vm03.local ceph-mon[47106]: pgmap v364: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 392 B/s wr, 0 op/s 2026-03-10T12:25:40.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:40 vm09.local ceph-mon[55914]: pgmap v364: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 392 B/s wr, 0 op/s 2026-03-10T12:25:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:42 vm09.local ceph-mon[55914]: pgmap v365: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:42.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:42 vm03.local ceph-mon[47106]: pgmap v365: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:42.876 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:43.029 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:43.029 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 3m ago 9m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:43.029 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 9m - - 2026-03-10T12:25:43.029 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:43.029 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:43.221 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:43.221 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:43.221 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:44 vm09.local ceph-mon[55914]: pgmap v366: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:44 vm09.local ceph-mon[55914]: from='client.15984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:44 vm09.local ceph-mon[55914]: from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:44 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/935012260' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:44.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:44 vm03.local ceph-mon[47106]: pgmap v366: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:44.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:44 vm03.local ceph-mon[47106]: from='client.15984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:44.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:44 vm03.local ceph-mon[47106]: from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:44.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:44 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/935012260' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:46 vm09.local ceph-mon[55914]: pgmap v367: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:46 vm03.local ceph-mon[47106]: pgmap v367: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:48 vm09.local ceph-mon[55914]: pgmap v368: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:48.400 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:48.400 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:48 vm03.local ceph-mon[47106]: pgmap v368: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:48.551 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:48.551 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 3m ago 10m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:48.551 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 9m - - 2026-03-10T12:25:48.551 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (9m) 4m ago 9m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:48.551 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:48.740 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:48.741 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:48.741 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:49 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2144394856' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:49 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2144394856' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:50 vm09.local ceph-mon[55914]: pgmap v369: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:50 vm09.local ceph-mon[55914]: from='client.15996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:50 vm09.local ceph-mon[55914]: from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:50 vm03.local ceph-mon[47106]: pgmap v369: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:50 vm03.local ceph-mon[47106]: from='client.15996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:50.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:50 vm03.local ceph-mon[47106]: from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:52.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:52 vm09.local ceph-mon[55914]: pgmap v370: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:52.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:52 vm03.local ceph-mon[47106]: pgmap v370: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:53.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:53 vm09.local ceph-mon[55914]: pgmap v371: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:53.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:53 vm03.local ceph-mon[47106]: pgmap v371: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:53.913 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:54.071 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:54.071 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 3m ago 10m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:54.071 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 10m - - 2026-03-10T12:25:54.071 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:54.071 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:54.168 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:54 vm03.local ceph-mon[47106]: from='client.16008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:54.168 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:54 vm03.local ceph-mon[47106]: from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:54.258 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:54.258 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:54.258 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:54 vm09.local ceph-mon[55914]: from='client.16008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:54 vm09.local ceph-mon[55914]: from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:25:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1020126203' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:55 vm09.local ceph-mon[55914]: pgmap v372: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1020126203' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:25:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:55 vm03.local ceph-mon[47106]: pgmap v372: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:25:57.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:57 vm09.local ceph-mon[55914]: pgmap v373: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:57.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:57 vm03.local ceph-mon[47106]: pgmap v373: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:25:59.432 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:25:59.581 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:25:59.581 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 3m ago 10m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:25:59.581 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 10m - - 2026-03-10T12:25:59.581 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:25:59.582 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:25:59.770 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:25:59 vm03.local ceph-mon[47106]: pgmap v374: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:25:59.770 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:25:59.770 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:25:59.770 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:25:59.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:25:59 vm09.local ceph-mon[55914]: pgmap v374: 129 pgs: 129 active+clean; 454 KiB data, 243 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:26:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:00 vm09.local ceph-mon[55914]: from='client.16020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:00 vm09.local ceph-mon[55914]: from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:00 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4013029040' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:00 vm03.local ceph-mon[47106]: from='client.16020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:00 vm03.local ceph-mon[47106]: from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:00 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4013029040' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:01.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:01 vm09.local ceph-mon[55914]: pgmap v375: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 41 KiB/s rd, 341 B/s wr, 68 op/s 2026-03-10T12:26:01.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:01 vm03.local ceph-mon[47106]: pgmap v375: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 41 KiB/s rd, 341 B/s wr, 68 op/s 2026-03-10T12:26:03.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:03 vm09.local ceph-mon[55914]: pgmap v376: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:03.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:03 vm03.local ceph-mon[47106]: pgmap v376: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:04.942 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:26:05.091 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:05.091 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 3m ago 10m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:05.091 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 10m - - 2026-03-10T12:26:05.091 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:26:05.091 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:05.299 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:05.299 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:05.299 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:26:05.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:05 vm03.local ceph-mon[47106]: pgmap v377: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:05.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:05 vm03.local ceph-mon[47106]: from='client.16032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:05.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:05 vm03.local ceph-mon[47106]: from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:05.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:05 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3308768134' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:05 vm09.local ceph-mon[55914]: pgmap v377: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:05 vm09.local ceph-mon[55914]: from='client.16032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:05 vm09.local ceph-mon[55914]: from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:05 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3308768134' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:07.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:07 vm09.local ceph-mon[55914]: pgmap v378: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:07.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:07 vm03.local ceph-mon[47106]: pgmap v378: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:09.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:09 vm09.local ceph-mon[55914]: pgmap v379: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:09.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:09 vm03.local ceph-mon[47106]: pgmap v379: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:10.471 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:26:10.622 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:10.622 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (4m) 3m ago 10m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:10.622 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 10m - - 2026-03-10T12:26:10.622 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:26:10.622 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 4m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:10.803 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:10.803 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:10.803 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:26:10.803 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:11 vm09.local ceph-mon[55914]: pgmap v380: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:11 vm09.local ceph-mon[55914]: from='client.16044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:11 vm09.local ceph-mon[55914]: from='client.25265 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:11 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3652113005' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:11.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:11 vm03.local ceph-mon[47106]: pgmap v380: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 341 B/s wr, 91 op/s 2026-03-10T12:26:11.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:11 vm03.local ceph-mon[47106]: from='client.16044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:11.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:11 vm03.local ceph-mon[47106]: from='client.25265 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:11.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:11 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3652113005' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:13.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:13 vm09.local ceph-mon[55914]: pgmap v381: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 22 op/s 2026-03-10T12:26:13.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:13 vm03.local ceph-mon[47106]: pgmap v381: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 22 op/s 2026-03-10T12:26:15.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:15 vm09.local ceph-mon[55914]: pgmap v382: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:26:15.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:15 vm03.local ceph-mon[47106]: pgmap v382: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:26:15.985 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to stop 2026-03-10T12:26:16.135 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:16.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 3m ago 10m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:16.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 10m - - 2026-03-10T12:26:16.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:26:16.135 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:16.330 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:16.330 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:16.330 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:26:16.525 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:26:16.525 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:26:16.529 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T12:26:16.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:16 vm03.local ceph-mon[47106]: from='client.16056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:16.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:16 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1544107871' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:16.689 INFO:teuthology.orchestra.run.vm03.stdout:anonymousScheduled to start rgw.foo.vm03.yhnrdc on host 'vm03' 2026-03-10T12:26:16.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:16 vm09.local ceph-mon[55914]: from='client.16056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:16.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:16 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1544107871' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:16.891 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm03.yhnrdc to start 2026-03-10T12:26:17.059 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:17.059 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 3m ago 10m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:17.059 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 error 3m ago 10m - - 2026-03-10T12:26:17.059 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:26:17.059 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:17.267 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:17.267 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:17.267 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm03.yhnrdc on vm03 is in error state 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='client.25275 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: pgmap v383: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='client.16068 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm03.yhnrdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: Schedule start daemon rgw.foo.vm03.yhnrdc 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: pgmap v384: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='client.16076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2640220764' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.043 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:17 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='client.25275 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: pgmap v383: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='client.16068 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm03.yhnrdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: Schedule start daemon rgw.foo.vm03.yhnrdc 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: pgmap v384: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='client.16076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2640220764' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:17 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: pgmap v385: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 198 B/s wr, 15 op/s 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: pgmap v386: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 239 B/s wr, 18 op/s 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:20.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:19 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: pgmap v385: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 198 B/s wr, 15 op/s 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: pgmap v386: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 239 B/s wr, 18 op/s 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:19 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:21.390 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:20 vm09.local ceph-mon[55914]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T12:26:21.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:20 vm09.local ceph-mon[55914]: Cluster is now healthy 2026-03-10T12:26:21.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:20 vm03.local ceph-mon[47106]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T12:26:21.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:20 vm03.local ceph-mon[47106]: Cluster is now healthy 2026-03-10T12:26:22.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:21 vm09.local ceph-mon[55914]: pgmap v387: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 64 KiB/s rd, 239 B/s wr, 99 op/s 2026-03-10T12:26:22.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:21 vm03.local ceph-mon[47106]: pgmap v387: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 64 KiB/s rd, 239 B/s wr, 99 op/s 2026-03-10T12:26:22.454 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5s) 3s ago 10m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:22.643 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled to stop rgw.foo.vm09.jddmdl on host 'vm09' 2026-03-10T12:26:22.828 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:26:22.979 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:22.979 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 4s ago 10m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:22.979 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5s) 4s ago 10m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:22.979 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:26:22.979 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:23.171 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='client.25299 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='client.16096 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.jddmdl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: Schedule stop daemon rgw.foo.vm09.jddmdl 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='client.16100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: pgmap v388: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 239 B/s wr, 115 op/s 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='client.16104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:23 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/457756664' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='client.25299 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='client.16096 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.jddmdl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: Schedule stop daemon rgw.foo.vm09.jddmdl 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='client.16100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: pgmap v388: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 239 B/s wr, 115 op/s 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='client.16104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:23.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:23 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/457756664' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:25 vm09.local ceph-mon[55914]: pgmap v389: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 239 B/s wr, 115 op/s 2026-03-10T12:26:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:26.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:25 vm03.local ceph-mon[47106]: pgmap v389: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 239 B/s wr, 115 op/s 2026-03-10T12:26:26.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:26.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:28.339 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:27 vm03.local ceph-mon[47106]: pgmap v390: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 62 KiB/s rd, 200 B/s wr, 96 op/s 2026-03-10T12:26:28.339 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:26:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:27 vm09.local ceph-mon[55914]: pgmap v390: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 62 KiB/s rd, 200 B/s wr, 96 op/s 2026-03-10T12:26:28.490 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:28.490 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 9s ago 10m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:28.490 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (11s) 9s ago 10m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:28.490 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:26:28.490 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:28.671 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:26:29.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:28 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3850787833' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:29.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:28 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3850787833' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:30.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:29 vm03.local ceph-mon[47106]: from='client.16112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:29 vm03.local ceph-mon[47106]: from='client.16116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:29 vm03.local ceph-mon[47106]: pgmap v391: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 200 B/s wr, 81 op/s 2026-03-10T12:26:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:29 vm09.local ceph-mon[55914]: from='client.16112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:29 vm09.local ceph-mon[55914]: from='client.16116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:30.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:29 vm09.local ceph-mon[55914]: pgmap v391: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 200 B/s wr, 81 op/s 2026-03-10T12:26:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:31 vm09.local ceph-mon[55914]: pgmap v392: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 341 B/s wr, 69 op/s 2026-03-10T12:26:32.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:31 vm03.local ceph-mon[47106]: pgmap v392: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 341 B/s wr, 69 op/s 2026-03-10T12:26:33.850 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:26:34.010 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:34.010 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 15s ago 10m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:34.010 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (16s) 15s ago 10m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:34.010 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 951819664aa6 2026-03-10T12:26:34.010 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 5m ago 10m 112M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:34.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:34 vm03.local ceph-mon[47106]: pgmap v393: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 341 B/s wr, 11 op/s 2026-03-10T12:26:34.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:34.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:34.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:34.210 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:26:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:34 vm09.local ceph-mon[55914]: pgmap v393: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 341 B/s wr, 11 op/s 2026-03-10T12:26:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='client.16124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='client.16128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:35 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2994154964' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='client.16124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='client.16128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:26:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:35 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2994154964' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:36 vm09.local ceph-mon[55914]: pgmap v394: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-03-10T12:26:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:36 vm09.local ceph-mon[55914]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T12:26:36.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:36 vm03.local ceph-mon[47106]: pgmap v394: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-03-10T12:26:36.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:36 vm03.local ceph-mon[47106]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T12:26:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:38 vm09.local ceph-mon[55914]: pgmap v395: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 90 B/s rd, 181 B/s wr, 0 op/s 2026-03-10T12:26:38.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:38 vm03.local ceph-mon[47106]: pgmap v395: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 90 B/s rd, 181 B/s wr, 0 op/s 2026-03-10T12:26:39.390 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:26:39.544 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:39.544 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 20s ago 10m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:39.544 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (22s) 20s ago 10m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:39.544 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 5s ago 10m - - 2026-03-10T12:26:39.544 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 5s ago 10m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:39.733 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:39.733 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:39.733 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:26:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:40 vm09.local ceph-mon[55914]: pgmap v396: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 90 B/s rd, 181 B/s wr, 0 op/s 2026-03-10T12:26:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:40 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/573688505' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:40.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:40 vm03.local ceph-mon[47106]: pgmap v396: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 90 B/s rd, 181 B/s wr, 0 op/s 2026-03-10T12:26:40.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:40 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/573688505' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:40.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:26:40.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:41.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:41 vm09.local ceph-mon[55914]: from='client.16136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:41.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:41 vm09.local ceph-mon[55914]: from='client.16140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:41.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:41 vm03.local ceph-mon[47106]: from='client.16136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:41.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:41 vm03.local ceph-mon[47106]: from='client.16140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:42 vm09.local ceph-mon[55914]: pgmap v397: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 90 B/s rd, 181 B/s wr, 0 op/s 2026-03-10T12:26:42.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:42 vm03.local ceph-mon[47106]: pgmap v397: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 90 B/s rd, 181 B/s wr, 0 op/s 2026-03-10T12:26:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:44 vm09.local ceph-mon[55914]: pgmap v398: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:26:44.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:44 vm03.local ceph-mon[47106]: pgmap v398: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:26:44.905 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:26:45.055 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:45.055 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 26s ago 10m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:45.055 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (27s) 26s ago 10m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:45.055 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 10s ago 10m - - 2026-03-10T12:26:45.055 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (10m) 10s ago 10m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:45.244 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:45.244 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:45.244 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:26:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:46 vm09.local ceph-mon[55914]: pgmap v399: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:26:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:46 vm09.local ceph-mon[55914]: from='client.16148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:46 vm09.local ceph-mon[55914]: from='client.16152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:46.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:46 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/876885492' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:46 vm03.local ceph-mon[47106]: pgmap v399: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:26:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:46 vm03.local ceph-mon[47106]: from='client.16148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:46 vm03.local ceph-mon[47106]: from='client.16152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:46.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:46 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/876885492' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:47.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:47 vm03.local ceph-mon[47106]: pgmap v400: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:26:47.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:47 vm09.local ceph-mon[55914]: pgmap v400: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:26:49.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:49 vm09.local ceph-mon[55914]: pgmap v401: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:26:49.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:49 vm03.local ceph-mon[47106]: pgmap v401: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:26:50.426 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:26:50.584 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:50.584 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 31s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:50.584 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (33s) 31s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:50.584 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 16s ago 11m - - 2026-03-10T12:26:50.584 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 16s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:50.795 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:50.795 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:50.795 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:26:51.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:51 vm03.local ceph-mon[47106]: pgmap v402: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:51.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:51 vm03.local ceph-mon[47106]: from='client.16160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:51.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:51 vm03.local ceph-mon[47106]: from='client.16164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:51.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:51 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/676579697' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:51.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:51 vm09.local ceph-mon[55914]: pgmap v402: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:51.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:51 vm09.local ceph-mon[55914]: from='client.16160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:51.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:51 vm09.local ceph-mon[55914]: from='client.16164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:51.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:51 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/676579697' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:53.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:53 vm03.local ceph-mon[47106]: pgmap v403: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:53.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:53 vm09.local ceph-mon[55914]: pgmap v403: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:55.707 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:55 vm03.local ceph-mon[47106]: pgmap v404: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:55.707 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:55 vm09.local ceph-mon[55914]: pgmap v404: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:26:55.964 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:26:56.111 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:26:56.111 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 37s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:26:56.111 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (38s) 37s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:26:56.111 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 21s ago 11m - - 2026-03-10T12:26:56.111 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 21s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:26:56.292 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:26:56.292 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:26:56.292 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:26:56.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:56 vm03.local ceph-mon[47106]: from='client.16172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:56.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:56 vm03.local ceph-mon[47106]: from='client.16176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:56.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:56 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/939422952' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:56 vm09.local ceph-mon[55914]: from='client.16172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:56 vm09.local ceph-mon[55914]: from='client.16176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:26:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:56 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/939422952' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:26:57.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:57 vm09.local ceph-mon[55914]: pgmap v405: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:57.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:57 vm03.local ceph-mon[47106]: pgmap v405: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:26:59.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:26:59 vm09.local ceph-mon[55914]: pgmap v406: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:26:59.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:26:59 vm03.local ceph-mon[47106]: pgmap v406: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:01.469 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:01.621 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:01.621 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 42s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:01.621 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (44s) 42s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:01.621 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 27s ago 11m - - 2026-03-10T12:27:01.621 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 27s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:01.810 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:01 vm03.local ceph-mon[47106]: pgmap v407: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:01.810 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:01.810 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:01.810 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:01.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:01 vm09.local ceph-mon[55914]: pgmap v407: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:02 vm09.local ceph-mon[55914]: from='client.16184 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:02 vm09.local ceph-mon[55914]: from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:02 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3210127358' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:02.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:02 vm03.local ceph-mon[47106]: from='client.16184 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:02.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:02 vm03.local ceph-mon[47106]: from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:02.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:02 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3210127358' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:03.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:03 vm09.local ceph-mon[55914]: pgmap v408: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:03.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:03 vm03.local ceph-mon[47106]: pgmap v408: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:05.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:05 vm03.local ceph-mon[47106]: pgmap v409: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:05 vm09.local ceph-mon[55914]: pgmap v409: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:06.984 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:07.134 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:07.134 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 48s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:07.134 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (49s) 48s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:07.134 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 32s ago 11m - - 2026-03-10T12:27:07.134 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 32s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:07.336 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:07.336 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:07.336 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:07.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:07 vm03.local ceph-mon[47106]: pgmap v410: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:07.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:07 vm03.local ceph-mon[47106]: from='client.16196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:07.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:07 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2524117645' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:07.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:07 vm09.local ceph-mon[55914]: pgmap v410: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:07.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:07 vm09.local ceph-mon[55914]: from='client.16196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:07.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:07 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2524117645' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:08 vm09.local ceph-mon[55914]: from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:08.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:08 vm03.local ceph-mon[47106]: from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:09.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:09 vm09.local ceph-mon[55914]: pgmap v411: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:09.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:09 vm03.local ceph-mon[47106]: pgmap v411: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:10.884 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:11 vm09.local ceph-mon[55914]: pgmap v412: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:11.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:11 vm03.local ceph-mon[47106]: pgmap v412: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:12.511 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:12.666 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:12.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (5m) 53s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:12.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (55s) 53s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:12.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 38s ago 11m - - 2026-03-10T12:27:12.666 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 38s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:12.861 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:12.861 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:12.861 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:13.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:13 vm09.local ceph-mon[55914]: pgmap v413: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:13.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:13 vm09.local ceph-mon[55914]: from='client.16208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:13.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:13 vm09.local ceph-mon[55914]: from='client.25391 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:13.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:13 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/469828974' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:13.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:13 vm03.local ceph-mon[47106]: pgmap v413: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:13.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:13 vm03.local ceph-mon[47106]: from='client.16208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:13.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:13 vm03.local ceph-mon[47106]: from='client.25391 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:13.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:13 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/469828974' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:15.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:15 vm09.local ceph-mon[55914]: pgmap v414: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:15.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:15 vm03.local ceph-mon[47106]: pgmap v414: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:17.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:17 vm09.local ceph-mon[55914]: pgmap v415: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:17.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:17 vm03.local ceph-mon[47106]: pgmap v415: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:18.034 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:18.188 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:18.188 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 59s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:18.188 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (60s) 59s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:18.188 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 44s ago 11m - - 2026-03-10T12:27:18.188 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 44s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:18.383 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:18.383 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:18.383 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:18.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:18 vm03.local ceph-mon[47106]: from='client.16220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:18.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:18 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1970909485' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:18.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:18 vm09.local ceph-mon[55914]: from='client.16220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:18.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:18 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1970909485' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:19 vm09.local ceph-mon[55914]: from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:19 vm09.local ceph-mon[55914]: pgmap v416: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:19.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:19 vm03.local ceph-mon[47106]: from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:19.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:19 vm03.local ceph-mon[47106]: pgmap v416: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:22.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:21 vm09.local ceph-mon[55914]: pgmap v417: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:22.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:21 vm03.local ceph-mon[47106]: pgmap v417: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:23.550 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:23.695 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:23.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 64s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:23.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (66s) 64s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:23.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 49s ago 11m - - 2026-03-10T12:27:23.696 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 49s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:23.878 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:23.878 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:23.878 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:23.878 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:23 vm03.local ceph-mon[47106]: pgmap v418: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:24.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:23 vm09.local ceph-mon[55914]: pgmap v418: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:24 vm09.local ceph-mon[55914]: from='client.16232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:24 vm09.local ceph-mon[55914]: from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:25.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:24 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2217613627' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:25.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:24 vm03.local ceph-mon[47106]: from='client.16232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:25.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:24 vm03.local ceph-mon[47106]: from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:25.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:24 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2217613627' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:26.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:25 vm03.local ceph-mon[47106]: pgmap v419: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:26.067 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:25 vm09.local ceph-mon[55914]: pgmap v419: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:26.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:28.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:27 vm09.local ceph-mon[55914]: pgmap v420: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:28.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:27 vm03.local ceph-mon[47106]: pgmap v420: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:29.050 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:29.200 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:29.200 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 70s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:29.200 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (71s) 70s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:29.200 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 55s ago 11m - - 2026-03-10T12:27:29.200 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 55s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:29.381 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:29.381 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:29.381 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:29 vm09.local ceph-mon[55914]: pgmap v421: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:29 vm09.local ceph-mon[55914]: from='client.16244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:29 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/892816681' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:29 vm03.local ceph-mon[47106]: pgmap v421: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:29 vm03.local ceph-mon[47106]: from='client.16244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:29 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/892816681' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:31.123 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:30 vm03.local ceph-mon[47106]: from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:30 vm09.local ceph-mon[55914]: from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:32.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:31 vm09.local ceph-mon[55914]: pgmap v422: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:32.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:31 vm03.local ceph-mon[47106]: pgmap v422: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:34.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:33 vm09.local ceph-mon[55914]: pgmap v423: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:34.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:33 vm03.local ceph-mon[47106]: pgmap v423: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:34.576 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:34.738 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 75s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (77s) 75s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 60s ago 11m - - 2026-03-10T12:27:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 60s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:27:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:27:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:27:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:27:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:27:34.940 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:34.940 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:34.940 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:27:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:27:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:27:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:27:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:27:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:35 vm09.local ceph-mon[55914]: pgmap v424: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:35 vm09.local ceph-mon[55914]: pgmap v425: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:27:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:35 vm09.local ceph-mon[55914]: from='client.16256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:35 vm09.local ceph-mon[55914]: from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:35 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/785922902' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:36.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:35 vm03.local ceph-mon[47106]: pgmap v424: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:36.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:35 vm03.local ceph-mon[47106]: pgmap v425: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:27:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:35 vm03.local ceph-mon[47106]: from='client.16256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:35 vm03.local ceph-mon[47106]: from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:35 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/785922902' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:38.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:37 vm09.local ceph-mon[55914]: pgmap v426: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:27:38.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:37 vm03.local ceph-mon[47106]: pgmap v426: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:27:40.124 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:39 vm03.local ceph-mon[47106]: pgmap v427: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:27:40.124 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:40.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:39 vm09.local ceph-mon[55914]: pgmap v427: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:27:40.280 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:40.280 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 81s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:40.280 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (82s) 81s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:40.280 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 66s ago 11m - - 2026-03-10T12:27:40.280 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 66s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:40.473 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:40.473 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:40.473 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:40 vm09.local ceph-mon[55914]: from='client.16268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:40 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3853153478' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:40 vm03.local ceph-mon[47106]: from='client.16268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:41.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:40 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3853153478' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:42.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:41 vm09.local ceph-mon[55914]: from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:42.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:41 vm09.local ceph-mon[55914]: pgmap v428: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:42.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:41 vm03.local ceph-mon[47106]: from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:42.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:41 vm03.local ceph-mon[47106]: pgmap v428: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:44.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:43 vm09.local ceph-mon[55914]: pgmap v429: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:44.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:43 vm03.local ceph-mon[47106]: pgmap v429: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:45.659 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:45.820 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:45.820 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 86s ago 11m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:45.820 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (88s) 86s ago 11m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:45.820 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 71s ago 11m - - 2026-03-10T12:27:45.820 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (11m) 71s ago 11m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:45.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:45 vm03.local ceph-mon[47106]: pgmap v430: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:46.016 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:46.016 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:46.016 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:46.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:45 vm09.local ceph-mon[55914]: pgmap v430: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:27:47.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:46 vm09.local ceph-mon[55914]: from='client.16280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:47.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:46 vm09.local ceph-mon[55914]: from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:47.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:46 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4154455282' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:47.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:46 vm03.local ceph-mon[47106]: from='client.16280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:47.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:46 vm03.local ceph-mon[47106]: from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:47.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:46 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4154455282' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:48.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:47 vm09.local ceph-mon[55914]: pgmap v431: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:48.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:47 vm03.local ceph-mon[47106]: pgmap v431: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:50.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:49 vm09.local ceph-mon[55914]: pgmap v432: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:50.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:49 vm03.local ceph-mon[47106]: pgmap v432: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:27:51.188 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:51.344 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:51.344 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 92s ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:51.344 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (94s) 92s ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:51.344 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 77s ago 12m - - 2026-03-10T12:27:51.344 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 77s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:51.561 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:51.561 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:51.561 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:52.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:51 vm09.local ceph-mon[55914]: pgmap v433: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:52.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:51 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3153351554' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:52.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:51 vm03.local ceph-mon[47106]: pgmap v433: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:52.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:51 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3153351554' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:53.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:52 vm09.local ceph-mon[55914]: from='client.16292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:53.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:52 vm09.local ceph-mon[55914]: from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:53.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:52 vm03.local ceph-mon[47106]: from='client.16292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:53.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:52 vm03.local ceph-mon[47106]: from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:54.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:53 vm09.local ceph-mon[55914]: pgmap v434: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:54.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:53 vm03.local ceph-mon[47106]: pgmap v434: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:55 vm09.local ceph-mon[55914]: pgmap v435: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:56.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:55 vm03.local ceph-mon[47106]: pgmap v435: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:56.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:27:56.739 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:27:56.896 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:27:56.896 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 97s ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:27:56.896 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (99s) 97s ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:27:56.896 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 82s ago 12m - - 2026-03-10T12:27:56.896 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 82s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:27:57.089 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:27:57.089 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:27:57.089 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:27:58.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:57 vm03.local ceph-mon[47106]: pgmap v436: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:58.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:57 vm03.local ceph-mon[47106]: from='client.16304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:58.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:57 vm03.local ceph-mon[47106]: from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:58.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:57 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/517788600' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:27:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:57 vm09.local ceph-mon[55914]: pgmap v436: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:27:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:57 vm09.local ceph-mon[55914]: from='client.16304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:57 vm09.local ceph-mon[55914]: from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:27:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:57 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/517788600' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:00.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:27:59 vm03.local ceph-mon[47106]: pgmap v437: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:27:59 vm09.local ceph-mon[55914]: pgmap v437: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:02.278 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:01 vm03.local ceph-mon[47106]: pgmap v438: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:02.279 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:01 vm09.local ceph-mon[55914]: pgmap v438: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:02.443 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:02.444 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 103s ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:02.444 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (105s) 103s ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:02.444 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 88s ago 12m - - 2026-03-10T12:28:02.444 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 88s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:02.646 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:02.646 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:02.646 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:02 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3433481792' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:03.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:02 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3433481792' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:03 vm09.local ceph-mon[55914]: from='client.16316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:03 vm09.local ceph-mon[55914]: from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:03 vm09.local ceph-mon[55914]: pgmap v439: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:03 vm03.local ceph-mon[47106]: from='client.16316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:03 vm03.local ceph-mon[47106]: from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:04.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:03 vm03.local ceph-mon[47106]: pgmap v439: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:05 vm09.local ceph-mon[55914]: pgmap v440: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:06.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:05 vm03.local ceph-mon[47106]: pgmap v440: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:07.836 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:07.997 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:07.997 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (6m) 109s ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:07.997 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (110s) 109s ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:07.997 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 93s ago 12m - - 2026-03-10T12:28:07.997 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 93s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:08.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:07 vm03.local ceph-mon[47106]: pgmap v441: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:08.195 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:08.195 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:08.195 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:08.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:07 vm09.local ceph-mon[55914]: pgmap v441: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:08 vm09.local ceph-mon[55914]: from='client.16328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:08 vm09.local ceph-mon[55914]: from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:08 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2923805579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:09.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:08 vm03.local ceph-mon[47106]: from='client.16328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:08 vm03.local ceph-mon[47106]: from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:08 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2923805579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:09 vm09.local ceph-mon[55914]: pgmap v442: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:10.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:09 vm03.local ceph-mon[47106]: pgmap v442: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:10.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:12.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:11 vm09.local ceph-mon[55914]: pgmap v443: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:12.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:11 vm03.local ceph-mon[47106]: pgmap v443: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:13.373 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:13.524 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:13.525 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 114s ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:13.525 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (116s) 114s ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:13.525 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 99s ago 12m - - 2026-03-10T12:28:13.525 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 99s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:13.712 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:13.712 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:13.712 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:14.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:13 vm03.local ceph-mon[47106]: pgmap v444: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:14.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:13 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/373825438' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:14.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:13 vm09.local ceph-mon[55914]: pgmap v444: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:14.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:13 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/373825438' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:15.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:14 vm09.local ceph-mon[55914]: from='client.16340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:15.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:14 vm09.local ceph-mon[55914]: from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:15.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:14 vm03.local ceph-mon[47106]: from='client.16340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:15.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:14 vm03.local ceph-mon[47106]: from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:16.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:15 vm09.local ceph-mon[55914]: pgmap v445: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:16.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:15 vm03.local ceph-mon[47106]: pgmap v445: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:18.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:17 vm09.local ceph-mon[55914]: pgmap v446: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:18.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:17 vm03.local ceph-mon[47106]: pgmap v446: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:18.892 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:19.046 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:19.046 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:19.046 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:19.046 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 104s ago 12m - - 2026-03-10T12:28:19.046 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 104s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:19.236 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:19.236 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:19.236 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:20.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:20 vm09.local ceph-mon[55914]: pgmap v447: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:20.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:20 vm09.local ceph-mon[55914]: from='client.16352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:20.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:20 vm09.local ceph-mon[55914]: from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:20.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:20 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/688885777' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:20.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:20 vm03.local ceph-mon[47106]: pgmap v447: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:20.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:20 vm03.local ceph-mon[47106]: from='client.16352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:20.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:20 vm03.local ceph-mon[47106]: from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:20.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:20 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/688885777' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:21.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:21 vm09.local ceph-mon[55914]: pgmap v448: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:21.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:21 vm03.local ceph-mon[47106]: pgmap v448: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:23 vm09.local ceph-mon[55914]: pgmap v449: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:23.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:23 vm03.local ceph-mon[47106]: pgmap v449: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:24.414 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:24.571 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:24.571 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:24.571 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:24.571 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 110s ago 12m - - 2026-03-10T12:28:24.571 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 110s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:24.778 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:24.778 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:24.778 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:25.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:25 vm09.local ceph-mon[55914]: from='client.16364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:25.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:25 vm09.local ceph-mon[55914]: pgmap v450: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:25.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:25 vm09.local ceph-mon[55914]: from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:25.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:25 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3786362133' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:25.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:25.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:25 vm03.local ceph-mon[47106]: from='client.16364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:25.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:25 vm03.local ceph-mon[47106]: pgmap v450: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:25.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:25 vm03.local ceph-mon[47106]: from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:25.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:25 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3786362133' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:25.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:27.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:27 vm09.local ceph-mon[55914]: pgmap v451: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:27 vm03.local ceph-mon[47106]: pgmap v451: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:29.969 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:30.130 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:30.130 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:30.130 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:30.130 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 115s ago 12m - - 2026-03-10T12:28:30.130 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 115s ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:30.130 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:29 vm03.local ceph-mon[47106]: pgmap v452: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:29 vm09.local ceph-mon[55914]: pgmap v452: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:30.324 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:30.324 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:30.324 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:30 vm09.local ceph-mon[55914]: from='client.16376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:30 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2586286611' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:31.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:30 vm03.local ceph-mon[47106]: from='client.16376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:31.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:30 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2586286611' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:32.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:31 vm09.local ceph-mon[55914]: from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:32.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:31 vm09.local ceph-mon[55914]: pgmap v453: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:32.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:31 vm03.local ceph-mon[47106]: from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:32.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:31 vm03.local ceph-mon[47106]: pgmap v453: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:34.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:33 vm09.local ceph-mon[55914]: pgmap v454: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:34.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:33 vm03.local ceph-mon[47106]: pgmap v454: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:35.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:28:35.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:28:35.510 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:35.667 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:35.667 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:35.667 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:35.667 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 12m - - 2026-03-10T12:28:35.667 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 2m ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:35.868 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:35.868 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:35.868 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:35 vm09.local ceph-mon[55914]: pgmap v455: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:28:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:28:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:35 vm09.local ceph-mon[55914]: pgmap v456: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:28:36.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:28:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:35 vm03.local ceph-mon[47106]: pgmap v455: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:28:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:28:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:35 vm03.local ceph-mon[47106]: pgmap v456: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:28:36.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:28:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:36 vm09.local ceph-mon[55914]: from='client.16388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:36 vm09.local ceph-mon[55914]: from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:36 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1472669849' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:37.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:36 vm03.local ceph-mon[47106]: from='client.16388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:37.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:36 vm03.local ceph-mon[47106]: from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:37.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:36 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1472669849' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:38.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:38 vm09.local ceph-mon[55914]: pgmap v457: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:38.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:38 vm03.local ceph-mon[47106]: pgmap v457: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:40 vm09.local ceph-mon[55914]: pgmap v458: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:40.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:40.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:40 vm03.local ceph-mon[47106]: pgmap v458: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:40.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:41.049 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:41.213 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:41.213 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:41.213 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:41.213 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 12m - - 2026-03-10T12:28:41.213 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 2m ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:41.405 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:41.405 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:41.405 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:42 vm09.local ceph-mon[55914]: pgmap v459: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:42 vm09.local ceph-mon[55914]: from='client.16400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:42.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:42 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4065566596' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:42.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:42 vm03.local ceph-mon[47106]: pgmap v459: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:42.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:42 vm03.local ceph-mon[47106]: from='client.16400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:42 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4065566596' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:43 vm09.local ceph-mon[55914]: from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:43.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:43 vm03.local ceph-mon[47106]: from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:44.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:44 vm09.local ceph-mon[55914]: pgmap v460: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:44.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:44 vm03.local ceph-mon[47106]: pgmap v460: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:28:46.585 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:46.585 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:46 vm03.local ceph-mon[47106]: pgmap v461: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:46.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:46 vm09.local ceph-mon[55914]: pgmap v461: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:28:46.745 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:46.745 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 12m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:46.745 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 12m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:46.745 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 12m - - 2026-03-10T12:28:46.745 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (12m) 2m ago 12m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:46.953 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:46.953 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:46.953 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:47.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:47 vm03.local ceph-mon[47106]: from='client.16412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:47.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:47 vm03.local ceph-mon[47106]: from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:47.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:47 vm03.local ceph-mon[47106]: pgmap v462: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:47.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:47 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2868708193' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:47 vm09.local ceph-mon[55914]: from='client.16412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:47 vm09.local ceph-mon[55914]: from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:47 vm09.local ceph-mon[55914]: pgmap v462: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:47.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:47 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2868708193' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:50.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:49 vm09.local ceph-mon[55914]: pgmap v463: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:50.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:49 vm03.local ceph-mon[47106]: pgmap v463: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:52.169 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:52.375 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:52.375 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:52.375 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:52.375 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:28:52.375 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:52.375 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:51 vm03.local ceph-mon[47106]: pgmap v464: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:52.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:51 vm09.local ceph-mon[55914]: pgmap v464: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:52.582 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:52.583 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:52.583 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:53.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:52 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1946876720' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:53.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:52 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1946876720' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:53 vm09.local ceph-mon[55914]: from='client.16424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:53 vm09.local ceph-mon[55914]: from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:54.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:53 vm09.local ceph-mon[55914]: pgmap v465: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:53 vm03.local ceph-mon[47106]: from='client.16424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:53 vm03.local ceph-mon[47106]: from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:54.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:53 vm03.local ceph-mon[47106]: pgmap v465: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:55.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:55.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:28:56.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:56 vm09.local ceph-mon[55914]: pgmap v466: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:56.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:56 vm03.local ceph-mon[47106]: pgmap v466: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:28:57.813 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:28:57.988 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:28:57.988 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:28:57.988 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:28:57.988 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:28:57.988 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:28:58.161 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:58 vm03.local ceph-mon[47106]: pgmap v467: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:58.212 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:28:58.213 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:28:58.213 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:28:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:58 vm09.local ceph-mon[55914]: pgmap v467: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:28:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:59 vm09.local ceph-mon[55914]: from='client.16436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:59 vm09.local ceph-mon[55914]: from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:28:59 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1963529047' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:28:59.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:59 vm03.local ceph-mon[47106]: from='client.16436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:59.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:59 vm03.local ceph-mon[47106]: from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:28:59.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:28:59 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1963529047' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:00.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:00 vm09.local ceph-mon[55914]: pgmap v468: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:00.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:00 vm03.local ceph-mon[47106]: pgmap v468: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:02 vm09.local ceph-mon[55914]: pgmap v469: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:02 vm03.local ceph-mon[47106]: pgmap v469: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:03.427 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:03.604 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:03.604 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:03.604 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:03.604 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:29:03.605 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:03.822 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:03.822 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:03.822 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:04.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:04 vm03.local ceph-mon[47106]: pgmap v470: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:04.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:04 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/726884993' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:04 vm09.local ceph-mon[55914]: pgmap v470: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:04.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:04 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/726884993' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:05 vm09.local ceph-mon[55914]: from='client.25533 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:05 vm09.local ceph-mon[55914]: from='client.25537 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:05.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:05 vm03.local ceph-mon[47106]: from='client.25533 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:05.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:05 vm03.local ceph-mon[47106]: from='client.25537 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:06.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:06 vm09.local ceph-mon[55914]: pgmap v471: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:06.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:06 vm03.local ceph-mon[47106]: pgmap v471: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:08.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:08 vm09.local ceph-mon[55914]: pgmap v472: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:08.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:08 vm03.local ceph-mon[47106]: pgmap v472: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:09.025 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:09.199 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:09.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (7m) 2m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:09.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:09.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:29:09.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:09.402 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:09.402 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:09.402 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:10 vm09.local ceph-mon[55914]: pgmap v473: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:10 vm09.local ceph-mon[55914]: from='client.16460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:10 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4163118097' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:10 vm03.local ceph-mon[47106]: pgmap v473: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:10 vm03.local ceph-mon[47106]: from='client.16460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:10 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4163118097' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:10.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:11 vm09.local ceph-mon[55914]: from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:11.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:11 vm03.local ceph-mon[47106]: from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:12.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:12 vm09.local ceph-mon[55914]: pgmap v474: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:12.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:12 vm03.local ceph-mon[47106]: pgmap v474: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:14.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:14 vm09.local ceph-mon[55914]: pgmap v475: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:14.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:14 vm03.local ceph-mon[47106]: pgmap v475: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:14.591 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:14.750 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:14.750 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 2m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:14.750 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (2m) 2m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:14.750 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:29:14.750 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:14.949 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:14.949 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:14.949 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:15.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:15 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1940837815' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:15.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:15 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1940837815' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:16.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:16 vm09.local ceph-mon[55914]: from='client.16472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:16.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:16 vm09.local ceph-mon[55914]: from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:16.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:16 vm09.local ceph-mon[55914]: pgmap v476: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:16.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:16 vm03.local ceph-mon[47106]: from='client.16472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:16.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:16 vm03.local ceph-mon[47106]: from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:16.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:16 vm03.local ceph-mon[47106]: pgmap v476: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:18.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:18 vm09.local ceph-mon[55914]: pgmap v477: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:18.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:18 vm03.local ceph-mon[47106]: pgmap v477: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:20.138 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:20.314 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:20.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:20.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:20.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:29:20.314 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:20.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:20 vm09.local ceph-mon[55914]: pgmap v478: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:20.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:20 vm03.local ceph-mon[47106]: pgmap v478: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:20.513 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:20.513 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:20.513 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:21.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:21 vm09.local ceph-mon[55914]: from='client.16484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:21.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:21 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4009868003' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:21.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:21 vm03.local ceph-mon[47106]: from='client.16484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:21.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:21 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4009868003' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:22.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:22 vm09.local ceph-mon[55914]: from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:22.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:22 vm09.local ceph-mon[55914]: pgmap v479: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:22.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:22 vm03.local ceph-mon[47106]: from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:22.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:22 vm03.local ceph-mon[47106]: pgmap v479: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:24.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:24 vm09.local ceph-mon[55914]: pgmap v480: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:24.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:24 vm03.local ceph-mon[47106]: pgmap v480: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:25.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:25.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:25.706 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:25.869 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:25.869 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:25.869 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:25.869 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:29:25.869 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:26.085 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:26.085 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:26.085 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:26 vm09.local ceph-mon[55914]: pgmap v481: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:26.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:26 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/229823480' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:26.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:26 vm03.local ceph-mon[47106]: pgmap v481: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:26.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:26 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/229823480' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:27.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:27 vm09.local ceph-mon[55914]: from='client.16496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:27.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:27 vm09.local ceph-mon[55914]: from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:27.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:27 vm03.local ceph-mon[47106]: from='client.16496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:27.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:27 vm03.local ceph-mon[47106]: from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:28.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:28 vm09.local ceph-mon[55914]: pgmap v482: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:28.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:28 vm03.local ceph-mon[47106]: pgmap v482: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:29.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:29 vm03.local ceph-mon[47106]: pgmap v483: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:29.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:29 vm09.local ceph-mon[55914]: pgmap v483: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:31.284 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:31.463 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:31.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:31.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:31.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 2m ago 13m - - 2026-03-10T12:29:31.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 2m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:31.687 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:31.687 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:31.687 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:32.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:31 vm03.local ceph-mon[47106]: pgmap v484: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:32.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:31 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/722359872' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:31 vm09.local ceph-mon[55914]: pgmap v484: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:32.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:31 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/722359872' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:33.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:32 vm03.local ceph-mon[47106]: from='client.16508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:33.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:32 vm03.local ceph-mon[47106]: from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:32 vm09.local ceph-mon[55914]: from='client.16508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:33.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:32 vm09.local ceph-mon[55914]: from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:34.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:33 vm09.local ceph-mon[55914]: pgmap v485: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:34.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:33 vm03.local ceph-mon[47106]: pgmap v485: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:35.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:34 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:29:35.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:34 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:29:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:35 vm09.local ceph-mon[55914]: pgmap v486: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:29:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:29:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:29:36.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:29:36.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:35 vm03.local ceph-mon[47106]: pgmap v486: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:36.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:29:36.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:29:36.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:29:36.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:29:36.884 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:37.057 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:37.057 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:37.057 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:37.058 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 13m - - 2026-03-10T12:29:37.058 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 3m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:37.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:36 vm03.local ceph-mon[47106]: pgmap v487: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:29:37.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:36 vm03.local ceph-mon[47106]: pgmap v488: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:29:37.264 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:37.264 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:37.264 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:36 vm09.local ceph-mon[55914]: pgmap v487: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:29:37.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:36 vm09.local ceph-mon[55914]: pgmap v488: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:29:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:38 vm09.local ceph-mon[55914]: from='client.16520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:38 vm09.local ceph-mon[55914]: from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:38 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/4290767148' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:38 vm03.local ceph-mon[47106]: from='client.16520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:38 vm03.local ceph-mon[47106]: from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:38 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/4290767148' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:39.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:39 vm09.local ceph-mon[55914]: pgmap v489: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:29:39.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:39 vm03.local ceph-mon[47106]: pgmap v489: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:29:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:40 vm09.local ceph-mon[55914]: pgmap v490: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:40.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:40 vm03.local ceph-mon[47106]: pgmap v490: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:40.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:42.460 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:42.631 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:42.631 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 13m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:42.631 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:42.631 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 13m - - 2026-03-10T12:29:42.631 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (13m) 3m ago 13m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:42.810 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:42 vm03.local ceph-mon[47106]: pgmap v491: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:42.851 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:42.852 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:42.852 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:42 vm09.local ceph-mon[55914]: pgmap v491: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:43.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:43 vm09.local ceph-mon[55914]: from='client.16532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:43.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:43 vm09.local ceph-mon[55914]: from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:43.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:43 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2183016687' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:43.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:43 vm03.local ceph-mon[47106]: from='client.16532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:43.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:43 vm03.local ceph-mon[47106]: from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:43.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:43 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2183016687' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:44 vm09.local ceph-mon[55914]: pgmap v492: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:44.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:44 vm03.local ceph-mon[47106]: pgmap v492: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:29:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:46 vm09.local ceph-mon[55914]: pgmap v493: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T12:29:46.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:46 vm03.local ceph-mon[47106]: pgmap v493: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T12:29:48.055 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:48.213 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:48.214 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:48.214 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 13m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:48.214 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 13m - - 2026-03-10T12:29:48.214 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:48.422 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:48.422 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:48.422 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:48.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:48 vm03.local ceph-mon[47106]: pgmap v494: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:48.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:48 vm03.local ceph-mon[47106]: from='client.16544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:48.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:48 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/194090852' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:48 vm09.local ceph-mon[55914]: pgmap v494: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:48 vm09.local ceph-mon[55914]: from='client.16544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:48 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/194090852' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:49.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:49 vm09.local ceph-mon[55914]: from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:49.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:49 vm03.local ceph-mon[47106]: from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:50.811 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:50 vm03.local ceph-mon[47106]: pgmap v495: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:50.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:50 vm09.local ceph-mon[55914]: pgmap v495: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:52.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:52 vm09.local ceph-mon[55914]: pgmap v496: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:52.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:52 vm03.local ceph-mon[47106]: pgmap v496: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:53.607 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:53.771 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:53.771 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:53.771 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:53.771 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:29:53.771 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:53.973 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:53.973 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:53.973 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:29:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:54 vm09.local ceph-mon[55914]: pgmap v497: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:54 vm09.local ceph-mon[55914]: from='client.16556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:54 vm09.local ceph-mon[55914]: from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:54.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:54 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3128449140' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:54.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:54 vm03.local ceph-mon[47106]: pgmap v497: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:54 vm03.local ceph-mon[47106]: from='client.16556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:54 vm03.local ceph-mon[47106]: from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:29:54.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:54 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3128449140' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:29:55.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:55.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:29:56.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:56 vm09.local ceph-mon[55914]: pgmap v498: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:56.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:56 vm03.local ceph-mon[47106]: pgmap v498: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:29:58.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:29:58 vm09.local ceph-mon[55914]: pgmap v499: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:58.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:29:58 vm03.local ceph-mon[47106]: pgmap v499: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:29:59.160 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:29:59.336 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:29:59.336 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:29:59.336 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:29:59.336 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:29:59.336 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:29:59.549 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:29:59.550 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:29:59.550 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:00 vm09.local ceph-mon[55914]: from='client.16568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:00 vm09.local ceph-mon[55914]: pgmap v500: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:00 vm09.local ceph-mon[55914]: from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:00 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3397896559' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:00.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:00 vm09.local ceph-mon[55914]: overall HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:00 vm03.local ceph-mon[47106]: from='client.16568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:00 vm03.local ceph-mon[47106]: pgmap v500: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:00 vm03.local ceph-mon[47106]: from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:00 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3397896559' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:00.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:00 vm03.local ceph-mon[47106]: overall HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:02.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:02 vm09.local ceph-mon[55914]: pgmap v501: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:02.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:02 vm03.local ceph-mon[47106]: pgmap v501: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:04.733 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:04.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:04 vm09.local ceph-mon[55914]: pgmap v502: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:04.904 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:04.904 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:04.904 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:04.904 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:30:04.904 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:04.904 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:04 vm03.local ceph-mon[47106]: pgmap v502: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:05.106 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:05.106 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:05.106 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:05 vm09.local ceph-mon[55914]: from='client.16580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:05 vm09.local ceph-mon[55914]: from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:05.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:05 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1759325272' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:05.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:05 vm03.local ceph-mon[47106]: from='client.16580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:05.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:05 vm03.local ceph-mon[47106]: from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:05.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:05 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1759325272' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:06.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:06 vm09.local ceph-mon[55914]: pgmap v503: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:06.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:06 vm03.local ceph-mon[47106]: pgmap v503: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:08.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:08 vm09.local ceph-mon[55914]: pgmap v504: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:08.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:08 vm03.local ceph-mon[47106]: pgmap v504: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:10.296 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:10.463 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:10.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (8m) 3m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:10.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:10.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:30:10.463 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:10.611 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:10 vm03.local ceph-mon[47106]: pgmap v505: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:10.611 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:10.674 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:10.675 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:10.675 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:10 vm09.local ceph-mon[55914]: pgmap v505: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:10.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:11 vm09.local ceph-mon[55914]: from='client.16592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:11 vm09.local ceph-mon[55914]: from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:11.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:11 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3436267143' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:11 vm03.local ceph-mon[47106]: from='client.16592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:11 vm03.local ceph-mon[47106]: from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:11.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:11 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3436267143' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:12.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:12 vm09.local ceph-mon[55914]: pgmap v506: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:12.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:12 vm03.local ceph-mon[47106]: pgmap v506: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:14.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:14 vm03.local ceph-mon[47106]: pgmap v507: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:15.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:14 vm09.local ceph-mon[55914]: pgmap v507: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:15.867 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:16.045 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:16.045 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 3m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:16.045 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (3m) 3m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:16.045 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:30:16.045 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:16.263 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:16.263 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:16.263 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:16.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:16 vm03.local ceph-mon[47106]: pgmap v508: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:16.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:16 vm03.local ceph-mon[47106]: from='client.16604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:16.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:16 vm03.local ceph-mon[47106]: from='client.16608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:16.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:16 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3223530810' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:16 vm09.local ceph-mon[55914]: pgmap v508: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:16 vm09.local ceph-mon[55914]: from='client.16604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:16 vm09.local ceph-mon[55914]: from='client.16608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:16 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3223530810' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:19.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:18 vm09.local ceph-mon[55914]: pgmap v509: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:19.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:18 vm03.local ceph-mon[47106]: pgmap v509: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:21.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:20 vm09.local ceph-mon[55914]: pgmap v510: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:21.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:20 vm03.local ceph-mon[47106]: pgmap v510: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:21.464 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:21.629 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:21.629 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:21.629 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:21.629 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:30:21.629 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:21.843 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:21.843 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:21.843 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:23.339 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:22 vm03.local ceph-mon[47106]: pgmap v511: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:23.339 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:22 vm03.local ceph-mon[47106]: from='client.16616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:23.339 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:22 vm03.local ceph-mon[47106]: from='client.16620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:23.339 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:22 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1827576597' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:22 vm09.local ceph-mon[55914]: pgmap v511: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:22 vm09.local ceph-mon[55914]: from='client.16616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:22 vm09.local ceph-mon[55914]: from='client.16620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:23.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:22 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1827576597' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:25.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:25 vm09.local ceph-mon[55914]: pgmap v512: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:25.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:25 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:25.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:25 vm03.local ceph-mon[47106]: pgmap v512: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:25 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:26.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:26 vm09.local ceph-mon[55914]: pgmap v513: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:26.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:26 vm03.local ceph-mon[47106]: pgmap v513: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:27.042 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:27.199 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:27.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:27.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:27.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:30:27.199 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:27.414 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:27.414 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:27.414 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:27.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:27 vm09.local ceph-mon[55914]: from='client.16628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:27.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:27 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/963191398' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:27 vm03.local ceph-mon[47106]: from='client.16628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:27.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:27 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/963191398' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:28.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:28 vm03.local ceph-mon[47106]: pgmap v514: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:28.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:28 vm03.local ceph-mon[47106]: from='client.16632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:28.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:28 vm09.local ceph-mon[55914]: pgmap v514: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:28.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:28 vm09.local ceph-mon[55914]: from='client.16632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:30.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:30 vm09.local ceph-mon[55914]: pgmap v515: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:30.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:30 vm03.local ceph-mon[47106]: pgmap v515: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:32.618 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:32.795 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:32.795 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:32.795 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:32.795 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 3m ago 14m - - 2026-03-10T12:30:32.795 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 3m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:32.796 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:32 vm03.local ceph-mon[47106]: pgmap v516: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:32.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:32 vm09.local ceph-mon[55914]: pgmap v516: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:32.995 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:32.995 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:32.995 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:33.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:33 vm09.local ceph-mon[55914]: from='client.16640 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:33.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:33 vm09.local ceph-mon[55914]: from='client.16644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:33.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:33 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/524434508' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:33.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:33 vm03.local ceph-mon[47106]: from='client.16640 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:33.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:33 vm03.local ceph-mon[47106]: from='client.16644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:33.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:33 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/524434508' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:34.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:34 vm09.local ceph-mon[55914]: pgmap v517: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:34.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:34 vm03.local ceph-mon[47106]: pgmap v517: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:30:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:30:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:30:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:30:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:35 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:30:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:30:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:30:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:30:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:30:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:35 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:30:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:36 vm09.local ceph-mon[55914]: pgmap v518: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:36 vm09.local ceph-mon[55914]: pgmap v519: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:30:36.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:36 vm09.local ceph-mon[55914]: pgmap v520: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:30:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:36 vm03.local ceph-mon[47106]: pgmap v518: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:36 vm03.local ceph-mon[47106]: pgmap v519: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T12:30:36.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:36 vm03.local ceph-mon[47106]: pgmap v520: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:30:38.185 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:38.351 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:38.351 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:38.351 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:38.351 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 14m - - 2026-03-10T12:30:38.351 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 4m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:38.566 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:38.566 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:38.566 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:38.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:38 vm09.local ceph-mon[55914]: pgmap v521: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:30:38.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:38 vm03.local ceph-mon[47106]: pgmap v521: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T12:30:39.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:39 vm09.local ceph-mon[55914]: from='client.16652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:39.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:39 vm09.local ceph-mon[55914]: from='client.16656 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:39.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:39 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1173936016' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:39.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:39 vm03.local ceph-mon[47106]: from='client.16652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:39.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:39 vm03.local ceph-mon[47106]: from='client.16656 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:39.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:39 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1173936016' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:40 vm09.local ceph-mon[55914]: pgmap v522: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:40.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:40.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:40 vm03.local ceph-mon[47106]: pgmap v522: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:40.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:42.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:42 vm09.local ceph-mon[55914]: pgmap v523: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:42.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:42 vm03.local ceph-mon[47106]: pgmap v523: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:43.776 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:43.943 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:43.943 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 14m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:43.943 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:43.943 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 14m - - 2026-03-10T12:30:43.943 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (14m) 4m ago 14m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:44.150 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:44.150 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:44.150 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:44 vm09.local ceph-mon[55914]: pgmap v524: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:44 vm09.local ceph-mon[55914]: from='client.25683 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:44 vm09.local ceph-mon[55914]: from='client.16668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:44.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:44 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1411143107' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:44.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:44 vm03.local ceph-mon[47106]: pgmap v524: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:30:44.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:44 vm03.local ceph-mon[47106]: from='client.25683 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:44.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:44 vm03.local ceph-mon[47106]: from='client.16668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:44.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:44 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1411143107' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:46.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:46 vm09.local ceph-mon[55914]: pgmap v525: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T12:30:46.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:46 vm03.local ceph-mon[47106]: pgmap v525: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T12:30:48.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:48 vm09.local ceph-mon[55914]: pgmap v526: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:48.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:48 vm03.local ceph-mon[47106]: pgmap v526: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:49.345 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:49.512 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:49.512 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:49.512 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 14m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:49.512 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 15m - - 2026-03-10T12:30:49.512 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 4m ago 15m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:49.715 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:49.715 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:49.715 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:50 vm09.local ceph-mon[55914]: from='client.16676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:50 vm09.local ceph-mon[55914]: pgmap v527: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:50 vm09.local ceph-mon[55914]: from='client.16680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:51.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:50 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2511401443' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:51.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:50 vm03.local ceph-mon[47106]: from='client.16676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:51.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:50 vm03.local ceph-mon[47106]: pgmap v527: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:51.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:50 vm03.local ceph-mon[47106]: from='client.16680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:51.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:50 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2511401443' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:53.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:52 vm09.local ceph-mon[55914]: pgmap v528: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:53.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:52 vm03.local ceph-mon[47106]: pgmap v528: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:54.895 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:30:55.052 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:30:55.052 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:30:55.052 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:30:55.052 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 15m - - 2026-03-10T12:30:55.052 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 4m ago 15m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:30:55.053 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:54 vm03.local ceph-mon[47106]: pgmap v529: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:54 vm09.local ceph-mon[55914]: pgmap v529: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:55.261 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:30:55.261 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:30:55.261 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:30:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:55 vm09.local ceph-mon[55914]: from='client.16688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:55 vm09.local ceph-mon[55914]: from='client.16692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/27830293' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:56.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:55 vm03.local ceph-mon[47106]: from='client.16688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:56.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:30:56.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:55 vm03.local ceph-mon[47106]: from='client.16692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:30:56.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/27830293' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:30:57.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:56 vm09.local ceph-mon[55914]: pgmap v530: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:57.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:56 vm03.local ceph-mon[47106]: pgmap v530: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:30:59.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:30:58 vm09.local ceph-mon[55914]: pgmap v531: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:30:59.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:30:58 vm03.local ceph-mon[47106]: pgmap v531: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:00.470 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:31:00.653 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:00.653 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:00.653 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:00.653 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 15m - - 2026-03-10T12:31:00.653 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 4m ago 15m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:31:00.872 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:00 vm03.local ceph-mon[47106]: pgmap v532: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:00.872 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:31:00.872 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:31:00.872 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:31:01.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:00 vm09.local ceph-mon[55914]: pgmap v532: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:02.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:01 vm09.local ceph-mon[55914]: from='client.16700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:02.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:01 vm09.local ceph-mon[55914]: from='client.16704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:02.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:01 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/2866139878' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:02.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:01 vm03.local ceph-mon[47106]: from='client.16700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:02.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:01 vm03.local ceph-mon[47106]: from='client.16704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:02.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:01 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/2866139878' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:03.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:02 vm09.local ceph-mon[55914]: pgmap v533: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:31:03.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:02 vm03.local ceph-mon[47106]: pgmap v533: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:31:05.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:04 vm09.local ceph-mon[55914]: pgmap v534: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:05.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:04 vm03.local ceph-mon[47106]: pgmap v534: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:06.053 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:31:06.217 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:06.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:06.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:06.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 15m - - 2026-03-10T12:31:06.218 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 4m ago 15m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:31:06.461 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:31:06.462 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:31:06.462 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:31:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:06 vm09.local ceph-mon[55914]: pgmap v535: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:06 vm09.local ceph-mon[55914]: from='client.16712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:07.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:06 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3387960731' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:07.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:06 vm03.local ceph-mon[47106]: pgmap v535: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:06 vm03.local ceph-mon[47106]: from='client.16712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:06 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3387960731' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:08.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:07 vm09.local ceph-mon[55914]: from='client.16716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:08.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:07 vm03.local ceph-mon[47106]: from='client.16716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:09.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:08 vm09.local ceph-mon[55914]: pgmap v536: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:09.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:08 vm03.local ceph-mon[47106]: pgmap v536: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:11.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:10 vm09.local ceph-mon[55914]: pgmap v537: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:11.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:11.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:10 vm03.local ceph-mon[47106]: pgmap v537: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:11.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:11.657 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:31:11.818 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:11.818 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (9m) 4m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:11.818 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (4m) 4m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:11.818 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 15m - - 2026-03-10T12:31:11.818 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 4m ago 15m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:31:12.010 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:31:12.010 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:31:12.010 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:31:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:12 vm09.local ceph-mon[55914]: pgmap v538: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:12 vm09.local ceph-mon[55914]: from='client.16724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:12 vm09.local ceph-mon[55914]: from='client.16728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:13.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:12 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3204122363' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:13.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:12 vm03.local ceph-mon[47106]: pgmap v538: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:31:13.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:12 vm03.local ceph-mon[47106]: from='client.16724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:13.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:12 vm03.local ceph-mon[47106]: from='client.16728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:13.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:12 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3204122363' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:15.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:14 vm09.local ceph-mon[55914]: pgmap v539: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:31:15.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:14 vm03.local ceph-mon[47106]: pgmap v539: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:31:17.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:16 vm09.local ceph-mon[55914]: pgmap v540: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 170 B/s wr, 36 op/s 2026-03-10T12:31:17.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:16 vm03.local ceph-mon[47106]: pgmap v540: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 170 B/s wr, 36 op/s 2026-03-10T12:31:17.201 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to stop 2026-03-10T12:31:17.369 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:17.369 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (10m) 4m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:17.369 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 4m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:17.369 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 15m - - 2026-03-10T12:31:17.369 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 4m ago 15m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:31:17.581 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:31:17.582 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:31:17.582 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:31:18.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:17 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3347290763' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:18.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:17 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3347290763' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:19.133 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:18 vm03.local ceph-mon[47106]: from='client.16736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:19.133 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:18 vm03.local ceph-mon[47106]: from='client.16740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:19.133 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:18 vm03.local ceph-mon[47106]: pgmap v541: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 170 B/s wr, 52 op/s 2026-03-10T12:31:19.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:18 vm09.local ceph-mon[55914]: from='client.16736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:19.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:18 vm09.local ceph-mon[55914]: from='client.16740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:19.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:18 vm09.local ceph-mon[55914]: pgmap v541: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 170 B/s wr, 52 op/s 2026-03-10T12:31:21.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:20 vm09.local ceph-mon[55914]: pgmap v542: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T12:31:21.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:20 vm03.local ceph-mon[47106]: pgmap v542: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T12:31:22.664 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:31:22.664 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:31:22.665 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T12:31:22.842 INFO:teuthology.orchestra.run.vm03.stdout:anonymousScheduled to start rgw.foo.vm09.jddmdl on host 'vm09' 2026-03-10T12:31:23.049 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.jddmdl to start 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: pgmap v543: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.049 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:22 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: pgmap v543: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:22 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:23.220 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:23.221 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (10m) 5m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:23.221 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 5m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:23.221 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 error 4m ago 15m - - 2026-03-10T12:31:23.221 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 4m ago 15m 118M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:31:23.435 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T12:31:23.435 INFO:teuthology.orchestra.run.vm03.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T12:31:23.435 INFO:teuthology.orchestra.run.vm03.stdout: daemon rgw.foo.vm09.jddmdl on vm09 is in error state 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: from='client.16752 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.jddmdl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: Schedule start daemon rgw.foo.vm09.jddmdl 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: pgmap v544: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 361 B/s wr, 63 op/s 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: from='client.16756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3104128189' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:23.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:23 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: from='client.16752 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.jddmdl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: Schedule start daemon rgw.foo.vm09.jddmdl 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: pgmap v544: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 361 B/s wr, 63 op/s 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: from='client.16756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3104128189' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:24.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:23 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:25.437 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:25 vm09.local ceph-mon[55914]: from='client.25739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:25.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:25 vm03.local ceph-mon[47106]: from='client.25739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: pgmap v545: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 361 B/s wr, 110 op/s 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: pgmap v546: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 205 B/s wr, 82 op/s 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:26.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:26 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: pgmap v545: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 361 B/s wr, 110 op/s 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: pgmap v546: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 205 B/s wr, 82 op/s 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:26.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:26 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:27.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:27 vm09.local ceph-mon[55914]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T12:31:27.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:27 vm09.local ceph-mon[55914]: Cluster is now healthy 2026-03-10T12:31:27.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:27 vm03.local ceph-mon[47106]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T12:31:27.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:27 vm03.local ceph-mon[47106]: Cluster is now healthy 2026-03-10T12:31:28.623 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5s) 3s ago 15m 95.8M - 19.2.3-678-ge911bdeb 654f31e6858e 46cdccfbbfd3 2026-03-10T12:31:28.624 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:28 vm03.local ceph-mon[47106]: pgmap v547: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 205 B/s wr, 92 op/s 2026-03-10T12:31:28.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:28 vm09.local ceph-mon[55914]: pgmap v547: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 205 B/s wr, 92 op/s 2026-03-10T12:31:28.802 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled to stop rgw.foo.vm09.wcqnzb on host 'vm09' 2026-03-10T12:31:29.002 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.wcqnzb to stop 2026-03-10T12:31:29.173 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:29.173 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (10m) 5m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:29.173 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 5m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:29.173 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (5s) 3s ago 15m 95.8M - 19.2.3-678-ge911bdeb 654f31e6858e 46cdccfbbfd3 2026-03-10T12:31:29.173 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 3s ago 15m 120M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:31:29.387 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.wcqnzb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: Schedule stop daemon rgw.foo.vm09.wcqnzb 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:30.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:29 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3050361968' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.wcqnzb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: Schedule stop daemon rgw.foo.vm09.wcqnzb 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:30.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:29 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3050361968' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:30 vm09.local ceph-mon[55914]: from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:31.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:30 vm09.local ceph-mon[55914]: pgmap v548: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 64 KiB/s rd, 205 B/s wr, 99 op/s 2026-03-10T12:31:31.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:30 vm03.local ceph-mon[47106]: from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:31.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:30 vm03.local ceph-mon[47106]: pgmap v548: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 64 KiB/s rd, 205 B/s wr, 99 op/s 2026-03-10T12:31:32.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:32 vm09.local ceph-mon[55914]: pgmap v549: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 64 KiB/s rd, 410 B/s wr, 99 op/s 2026-03-10T12:31:32.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:32 vm03.local ceph-mon[47106]: pgmap v549: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 64 KiB/s rd, 410 B/s wr, 99 op/s 2026-03-10T12:31:34.579 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.wcqnzb to stop 2026-03-10T12:31:34.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:34 vm03.local ceph-mon[47106]: pgmap v550: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 60 KiB/s rd, 385 B/s wr, 93 op/s 2026-03-10T12:31:34.738 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (10m) 5m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 5m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (11s) 9s ago 15m 95.8M - 19.2.3-678-ge911bdeb 654f31e6858e 46cdccfbbfd3 2026-03-10T12:31:34.738 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (15m) 9s ago 15m 120M - 19.2.3-678-ge911bdeb 654f31e6858e 49c1a8b29c06 2026-03-10T12:31:34.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:34 vm09.local ceph-mon[55914]: pgmap v550: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 60 KiB/s rd, 385 B/s wr, 93 op/s 2026-03-10T12:31:34.934 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:31:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:35 vm09.local ceph-mon[55914]: from='client.25763 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:35 vm09.local ceph-mon[55914]: from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:35.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:35 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1031899836' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:35 vm03.local ceph-mon[47106]: from='client.25763 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:35 vm03.local ceph-mon[47106]: from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:35.910 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:35 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1031899836' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:37.124 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:36 vm09.local ceph-mon[55914]: pgmap v551: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 385 B/s wr, 42 op/s 2026-03-10T12:31:37.124 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:36 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:37.124 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:36 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:37.124 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:36 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:37.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:36 vm03.local ceph-mon[47106]: pgmap v551: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 385 B/s wr, 42 op/s 2026-03-10T12:31:37.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:36 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:37.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:36 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:37.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:36 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:38 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:38 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:38 vm09.local ceph-mon[55914]: pgmap v552: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 341 B/s wr, 37 op/s 2026-03-10T12:31:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:38 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:38 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:38 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:38.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:38 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:38 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:38 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:38 vm03.local ceph-mon[47106]: pgmap v552: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 341 B/s wr, 37 op/s 2026-03-10T12:31:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:38 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:38 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:38 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:38.660 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:38 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:40.136 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 stopped 2s ago 15m - - 2026-03-10T12:31:40.145 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:31:40.148 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:31:40.150 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 23375 0 --:--:-- --:--:-- --:--:-- 23375 2026-03-10T12:31:40.353 INFO:teuthology.orchestra.run.vm03.stdout:anonymousScheduled to start rgw.foo.vm09.wcqnzb on host 'vm09' 2026-03-10T12:31:40.550 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for rgw.foo.vm09.wcqnzb to start 2026-03-10T12:31:40.720 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:40.720 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (10m) 5m ago 15m 124M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:31:40.720 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 5m ago 15m 93.7M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:31:40.720 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (17s) 3s ago 15m 96.9M - 19.2.3-678-ge911bdeb 654f31e6858e 46cdccfbbfd3 2026-03-10T12:31:40.720 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 stopped 3s ago 15m - - 2026-03-10T12:31:40.925 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: pgmap v553: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 341 B/s wr, 13 op/s 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:41.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:40 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/1908912412' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:41.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: pgmap v553: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 8.7 KiB/s rd, 341 B/s wr, 13 op/s 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:41.411 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:40 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/1908912412' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:42.251 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:41 vm09.local ceph-mon[55914]: from='client.16810 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.wcqnzb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:42.251 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:41 vm09.local ceph-mon[55914]: Schedule start daemon rgw.foo.vm09.wcqnzb 2026-03-10T12:31:42.251 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:41 vm09.local ceph-mon[55914]: from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:42.251 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:41 vm09.local ceph-mon[55914]: from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:42.251 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:41 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:42.251 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:41 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:42.251 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:41 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:41 vm03.local ceph-mon[47106]: from='client.16810 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.wcqnzb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:41 vm03.local ceph-mon[47106]: Schedule start daemon rgw.foo.vm09.wcqnzb 2026-03-10T12:31:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:41 vm03.local ceph-mon[47106]: from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:41 vm03.local ceph-mon[47106]: from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:41 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:41 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:42.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:41 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:42 vm09.local ceph-mon[55914]: pgmap v554: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:31:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:42 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:42 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:42 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:42 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:42 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:43.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:42 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:42 vm03.local ceph-mon[47106]: pgmap v554: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:31:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:42 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:42 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:42 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:42 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:42 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:43.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:42 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:45.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:44 vm09.local ceph-mon[55914]: pgmap v555: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 6.2 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T12:31:45.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:44 vm03.local ceph-mon[47106]: pgmap v555: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 6.2 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-10T12:31:46.107 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (5s) 3s ago 15m 56.6M - 19.2.3-678-ge911bdeb 654f31e6858e 3bfce1c83f3d 2026-03-10T12:31:46.107 INFO:teuthology.orchestra.run.vm03.stdout:Check with each haproxy down in turn... 2026-03-10T12:31:46.451 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled to stop haproxy.rgw.foo.vm03.oeugxe on host 'vm03' 2026-03-10T12:31:46.664 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for haproxy.rgw.foo.vm03.oeugxe to stop 2026-03-10T12:31:46.836 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:46.837 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm03.oeugxe vm03 *:9000,9001 running (15m) 5m ago 15m 3846k - 2.3.17-d1c9119 e85424b0d443 a30374aad12b 2026-03-10T12:31:46.837 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm09.mpsxsc vm09 *:9000,9001 running (15m) 4s ago 15m 7084k - 2.3.17-d1c9119 e85424b0d443 53e927129207 2026-03-10T12:31:47.075 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: pgmap v556: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 43 KiB/s rd, 170 B/s wr, 67 op/s 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='client.16836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.292 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:47 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: pgmap v556: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 43 KiB/s rd, 170 B/s wr, 67 op/s 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='client.16836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:47.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:47 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:48.096 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:48 vm03.local ceph-mon[47106]: from='client.16840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.096 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:48 vm03.local ceph-mon[47106]: from='client.16844 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm03.oeugxe", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.096 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:48 vm03.local ceph-mon[47106]: Schedule stop daemon haproxy.rgw.foo.vm03.oeugxe 2026-03-10T12:31:48.096 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:48 vm03.local ceph-mon[47106]: from='client.16848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.096 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:48 vm03.local ceph-mon[47106]: from='client.25815 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.096 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:48 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:48.096 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:48 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3955061851' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:48 vm09.local ceph-mon[55914]: from='client.16840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:48 vm09.local ceph-mon[55914]: from='client.16844 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm03.oeugxe", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:48 vm09.local ceph-mon[55914]: Schedule stop daemon haproxy.rgw.foo.vm03.oeugxe 2026-03-10T12:31:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:48 vm09.local ceph-mon[55914]: from='client.16848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:48 vm09.local ceph-mon[55914]: from='client.25815 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:48 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:48.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:48 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3955061851' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:49 vm09.local ceph-mon[55914]: pgmap v557: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 170 B/s wr, 82 op/s 2026-03-10T12:31:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:49.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:49 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:49 vm03.local ceph-mon[47106]: pgmap v557: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 170 B/s wr, 82 op/s 2026-03-10T12:31:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:49.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:49 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:50.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:50 vm09.local ceph-mon[55914]: pgmap v558: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 170 B/s wr, 82 op/s 2026-03-10T12:31:50.659 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:50 vm03.local ceph-mon[47106]: pgmap v558: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 170 B/s wr, 82 op/s 2026-03-10T12:31:52.261 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm03.oeugxe vm03 *:9000,9001 stopped 3s ago 15m - - 2026-03-10T12:31:52.266 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:31:52.266 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:31:52.267 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-03-10T12:31:52.267 INFO:teuthology.orchestra.run.vm03.stderr:curl: (7) Failed to connect to 12.12.1.103 port 9000: Connection refused 2026-03-10T12:31:52.267 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for http://12.12.1.103:9000/ to be available 2026-03-10T12:31:52.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:52 vm09.local ceph-mon[55914]: pgmap v559: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 341 B/s wr, 82 op/s 2026-03-10T12:31:52.909 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:52 vm03.local ceph-mon[47106]: pgmap v559: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 341 B/s wr, 82 op/s 2026-03-10T12:31:53.272 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:31:53.272 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:31:53.272 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-03-10T12:31:53.272 INFO:teuthology.orchestra.run.vm03.stderr:curl: (7) Failed to connect to 12.12.1.103 port 9000: Connection refused 2026-03-10T12:31:53.272 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for http://12.12.1.103:9000/ to be available 2026-03-10T12:31:54.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:53 vm09.local ceph-mon[55914]: from='client.16858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:54.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:53 vm03.local ceph-mon[47106]: from='client.16858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:54.277 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:31:54.277 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:31:54.278 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T12:31:54.469 INFO:teuthology.orchestra.run.vm03.stdout:anonymousScheduled to start haproxy.rgw.foo.vm03.oeugxe on host 'vm03' 2026-03-10T12:31:54.702 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for haproxy.rgw.foo.vm03.oeugxe to start 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: pgmap v560: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 341 B/s wr, 82 op/s 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:54.824 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:54 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:54.908 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:31:54.908 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm03.oeugxe vm03 *:9000,9001 stopped 6s ago 15m - - 2026-03-10T12:31:54.908 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm09.mpsxsc vm09 *:9000,9001 running (16m) 12s ago 16m 7084k - 2.3.17-d1c9119 e85424b0d443 53e927129207 2026-03-10T12:31:55.138 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: pgmap v560: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 341 B/s wr, 82 op/s 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:55.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:54 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm03.oeugxe", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: Schedule start daemon haproxy.rgw.foo.vm03.oeugxe 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='client.25827 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='client.25829 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/478670387' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:56.018 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:55 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm03.oeugxe", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: Schedule start daemon haproxy.rgw.foo.vm03.oeugxe 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='client.25827 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='client.25829 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/478670387' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:56.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:55 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:31:56.881 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:56 vm03.local ceph-mon[47106]: pgmap v561: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 511 B/s wr, 73 op/s 2026-03-10T12:31:57.141 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:56 vm09.local ceph-mon[55914]: pgmap v561: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 47 KiB/s rd, 511 B/s wr, 73 op/s 2026-03-10T12:31:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:58.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:58 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:58.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:58.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:58.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:31:58.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:31:58.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:31:58.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:58 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:31:59.159 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:31:59 vm03.local ceph-mon[47106]: pgmap v562: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 341 B/s wr, 15 op/s 2026-03-10T12:31:59.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:31:59 vm09.local ceph-mon[55914]: pgmap v562: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 341 B/s wr, 15 op/s 2026-03-10T12:32:00.320 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm03.oeugxe vm03 *:9000,9001 running (5s) 3s ago 16m 3636k - 2.3.17-d1c9119 e85424b0d443 cb975e8f0326 2026-03-10T12:32:00.472 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled to stop haproxy.rgw.foo.vm09.mpsxsc on host 'vm09' 2026-03-10T12:32:00.651 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for haproxy.rgw.foo.vm09.mpsxsc to stop 2026-03-10T12:32:00.805 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:32:00.805 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm03.oeugxe vm03 *:9000,9001 running (5s) 3s ago 16m 3636k - 2.3.17-d1c9119 e85424b0d443 cb975e8f0326 2026-03-10T12:32:00.805 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm09.mpsxsc vm09 *:9000,9001 running (16m) 18s ago 16m 7084k - 2.3.17-d1c9119 e85424b0d443 53e927129207 2026-03-10T12:32:01.001 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:32:01.377 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: pgmap v563: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:32:01.377 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.377 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.377 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:01.377 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:01.377 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:01.378 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.378 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:01.378 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.378 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.378 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:01.378 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:01 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/3742846054' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: pgmap v563: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:01.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:01 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/3742846054' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:32:02.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='client.16876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='client.16880 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm09.mpsxsc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: Schedule stop daemon haproxy.rgw.foo.vm09.mpsxsc 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='client.16884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='client.16888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:02.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:02 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:02.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='client.16876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='client.16880 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm09.mpsxsc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: Schedule stop daemon haproxy.rgw.foo.vm09.mpsxsc 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='client.16884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='client.16888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:02.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:02 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:03.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:03 vm09.local ceph-mon[55914]: pgmap v564: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:32:03.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:03 vm03.local ceph-mon[47106]: pgmap v564: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:32:05.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:05 vm09.local ceph-mon[55914]: pgmap v565: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:32:05.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:05 vm03.local ceph-mon[47106]: pgmap v565: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:32:06.183 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm09.mpsxsc vm09 *:9000,9001 stopped 4s ago 16m - - 2026-03-10T12:32:06.188 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:32:06.188 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:32:06.189 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T12:32:06.353 INFO:teuthology.orchestra.run.vm03.stdout:anonymousScheduled to start haproxy.rgw.foo.vm09.mpsxsc on host 'vm09' 2026-03-10T12:32:06.541 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for haproxy.rgw.foo.vm09.mpsxsc to start 2026-03-10T12:32:06.703 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:32:06.703 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm03.oeugxe vm03 *:9000,9001 running (11s) 9s ago 16m 3636k - 2.3.17-d1c9119 e85424b0d443 cb975e8f0326 2026-03-10T12:32:06.703 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm09.mpsxsc vm09 *:9000,9001 stopped 4s ago 16m - - 2026-03-10T12:32:06.900 INFO:teuthology.orchestra.run.vm03.stdout:HEALTH_OK 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: pgmap v566: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='client.? 192.168.123.103:0/958848088' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.103 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:07 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: pgmap v566: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='client.? 192.168.123.103:0/958848088' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:07.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:07 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T12:32:08.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:08 vm09.local ceph-mon[55914]: from='client.16896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:08.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:08 vm09.local ceph-mon[55914]: from='client.16900 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm09.mpsxsc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:08.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:08 vm09.local ceph-mon[55914]: Schedule start daemon haproxy.rgw.foo.vm09.mpsxsc 2026-03-10T12:32:08.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:08 vm09.local ceph-mon[55914]: from='client.25845 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:08.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:08 vm09.local ceph-mon[55914]: from='client.16908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:08 vm03.local ceph-mon[47106]: from='client.16896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:08 vm03.local ceph-mon[47106]: from='client.16900 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm09.mpsxsc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:08 vm03.local ceph-mon[47106]: Schedule start daemon haproxy.rgw.foo.vm09.mpsxsc 2026-03-10T12:32:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:08 vm03.local ceph-mon[47106]: from='client.25845 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:08.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:08 vm03.local ceph-mon[47106]: from='client.16908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:09 vm09.local ceph-mon[55914]: pgmap v567: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:09.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:09 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:09 vm03.local ceph-mon[47106]: pgmap v567: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T12:32:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T12:32:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' 2026-03-10T12:32:09.410 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:09 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T12:32:10.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:10 vm09.local ceph-mon[55914]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:32:10.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:10 vm03.local ceph-mon[47106]: from='mgr.14217 192.168.123.103:0/2353944301' entity='mgr.vm03.oxmxtj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T12:32:11.391 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:11 vm09.local ceph-mon[55914]: pgmap v568: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:11.409 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:11 vm03.local ceph-mon[47106]: pgmap v568: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:12.087 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm09.mpsxsc vm09 *:9000,9001 running (5s) 3s ago 16m 3615k - 2.3.17-d1c9119 e85424b0d443 58ae5fb3589b 2026-03-10T12:32:12.094 INFO:teuthology.orchestra.run.vm03.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T12:32:12.096 INFO:teuthology.orchestra.run.vm03.stderr: Dload Upload Total Spent Left Speed 2026-03-10T12:32:12.101 INFO:teuthology.orchestra.run.vm03.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 26714 0 --:--:-- --:--:-- --:--:-- 26714 2026-03-10T12:32:12.172 INFO:teuthology.orchestra.run.vm03.stdout:anonymous 2026-03-10T12:32:12.172 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T12:32:12.175 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-10T12:32:12.175 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-03-10T12:32:12.366 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:12.449 INFO:teuthology.orchestra.run.vm03.stdout:167 167 2026-03-10T12:32:12.496 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch status' 2026-03-10T12:32:12.676 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:13.057 INFO:teuthology.orchestra.run.vm03.stdout:Backend: cephadm 2026-03-10T12:32:13.057 INFO:teuthology.orchestra.run.vm03.stdout:Available: Yes 2026-03-10T12:32:13.057 INFO:teuthology.orchestra.run.vm03.stdout:Paused: No 2026-03-10T12:32:13.537 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch ps' 2026-03-10T12:32:13.724 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:13.821 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:13 vm03.local ceph-mon[47106]: pgmap v569: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:13.821 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:13 vm03.local ceph-mon[47106]: from='client.16916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:13.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:13 vm09.local ceph-mon[55914]: pgmap v569: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:13.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:13 vm09.local ceph-mon[55914]: from='client.16916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager.vm03 vm03 *:9093,9094 running (17m) 16s ago 17m 24.8M - 0.25.0 c8568f914cd2 55c3a7e1ea2e 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm03 vm03 *:9926 running (17m) 16s ago 17m 10.4M - 19.2.3-678-ge911bdeb 654f31e6858e 35efa696e1b7 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm09 vm09 *:9926 running (17m) 5s ago 17m 6559k - 19.2.3-678-ge911bdeb 654f31e6858e 996319fa5e1a 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm03 vm03 running (17m) 16s ago 17m 7612k - 19.2.3-678-ge911bdeb 654f31e6858e 8ec21e01bd5e 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm09 vm09 running (17m) 5s ago 17m 7612k - 19.2.3-678-ge911bdeb 654f31e6858e 1c1b7bfaa8f6 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:grafana.vm03 vm03 *:3000 running (17m) 16s ago 17m 77.2M - 10.4.0 c8b91775d855 2640e77d8ce9 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm03.oeugxe vm03 *:9000,9001 running (18s) 16s ago 16m 3636k - 2.3.17-d1c9119 e85424b0d443 cb975e8f0326 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.rgw.foo.vm09.mpsxsc vm09 *:9000,9001 running (7s) 5s ago 16m 3615k - 2.3.17-d1c9119 e85424b0d443 58ae5fb3589b 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:keepalived.rgw.foo.vm03.wvnyuc vm03 running (16m) 16s ago 16m 2373k - 2.2.4 4a3a1ff181d9 b784a9f42456 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:keepalived.rgw.foo.vm09.ecnqjx vm09 running (16m) 5s ago 16m 2377k - 2.2.4 4a3a1ff181d9 8452c268c2cb 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm03.oxmxtj vm03 *:9283,8765,8443 running (18m) 16s ago 18m 574M - 19.2.3-678-ge911bdeb 654f31e6858e 0fc4978a7cf9 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm09.pftowo vm09 *:8443,9283,8765 running (17m) 5s ago 17m 495M - 19.2.3-678-ge911bdeb 654f31e6858e 2008d1c4f3e8 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm03 vm03 running (18m) 16s ago 18m 61.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e2f02b4b2305 2026-03-10T12:32:14.061 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm09 vm09 running (17m) 5s ago 17m 44.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 71f1d5b5c98c 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm03 vm03 *:9100 running (17m) 16s ago 17m 9969k - 1.7.0 72c9c2088986 60adf9e9ea4d 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm09 vm09 *:9100 running (17m) 5s ago 17m 9977k - 1.7.0 72c9c2088986 5dcad22f7bd1 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.0 vm09 running (16m) 5s ago 16m 55.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e9c4c6f3e676 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.1 vm03 running (16m) 16s ago 16m 79.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 11ac912e0f2c 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.2 vm09 running (16m) 5s ago 16m 55.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e fb910104e413 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.3 vm03 running (16m) 16s ago 16m 59.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 36f5c304c0ef 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.4 vm09 running (16m) 5s ago 16m 77.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d9e3c8311bd5 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.5 vm03 running (16m) 16s ago 16m 76.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e c9971542bd3a 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.6 vm09 running (16m) 5s ago 16m 55.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d6eae2d41cbd 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:osd.7 vm03 running (16m) 16s ago 16m 53.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 113dcc0c2e61 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:prometheus.vm03 vm03 *:9095 running (16m) 16s ago 17m 59.3M - 2.51.0 1d3b7f56885b aff2d1f59a56 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.pqsxbr vm03 *:8000 running (11m) 16s ago 16m 135M - 19.2.3-678-ge911bdeb 654f31e6858e 51e5575cc716 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm03.yhnrdc vm03 *:8001 running (5m) 16s ago 16m 124M - 19.2.3-678-ge911bdeb 654f31e6858e c04249fd4424 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.jddmdl vm09 *:8001 running (50s) 5s ago 16m 98.4M - 19.2.3-678-ge911bdeb 654f31e6858e 46cdccfbbfd3 2026-03-10T12:32:14.062 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo.vm09.wcqnzb vm09 *:8000 running (33s) 5s ago 16m 99.7M - 19.2.3-678-ge911bdeb 654f31e6858e 3bfce1c83f3d 2026-03-10T12:32:14.146 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch ls' 2026-03-10T12:32:14.340 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager ?:9093,9094 1/1 17s ago 18m count:1 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter ?:9926 2/2 17s ago 18m * 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:crash 2/2 17s ago 18m * 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:grafana ?:3000 1/1 17s ago 18m count:1 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:ingress.rgw.foo 12.12.1.103:9000,9001 4/4 17s ago 16m count:2 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:mgr 2/2 17s ago 18m count:2 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:mon 2/2 17s ago 17m vm03:192.168.123.103=vm03;vm09:192.168.123.109=vm09;count:2 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter ?:9100 2/2 17s ago 18m * 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:osd.all-available-devices 8 17s ago 17m * 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:prometheus ?:9095 1/1 17s ago 18m count:1 2026-03-10T12:32:14.589 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foo ?:8000 4/4 17s ago 16m count:4;* 2026-03-10T12:32:14.624 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:14 vm03.local ceph-mon[47106]: from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:14.624 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:14 vm03.local ceph-mon[47106]: pgmap v570: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:14.624 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:14 vm03.local ceph-mon[47106]: from='client.16924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:14.667 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch host ls' 2026-03-10T12:32:14.843 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:14 vm09.local ceph-mon[55914]: from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:14 vm09.local ceph-mon[55914]: pgmap v570: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail 2026-03-10T12:32:14.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:14 vm09.local ceph-mon[55914]: from='client.16924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:15.092 INFO:teuthology.orchestra.run.vm03.stdout:HOST ADDR LABELS STATUS 2026-03-10T12:32:15.092 INFO:teuthology.orchestra.run.vm03.stdout:vm03 192.168.123.103 2026-03-10T12:32:15.092 INFO:teuthology.orchestra.run.vm03.stdout:vm09 192.168.123.109 2026-03-10T12:32:15.092 INFO:teuthology.orchestra.run.vm03.stdout:2 hosts in cluster 2026-03-10T12:32:15.140 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch device ls' 2026-03-10T12:32:15.331 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 16m ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdb hdd DWNBRSTVMM03001 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdc hdd DWNBRSTVMM03002 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdd hdd DWNBRSTVMM03003 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vde hdd DWNBRSTVMM03004 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 16m ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdb hdd DWNBRSTVMM09001 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdc hdd DWNBRSTVMM09002 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vdd hdd DWNBRSTVMM09003 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.576 INFO:teuthology.orchestra.run.vm03.stdout:vm09 /dev/vde hdd DWNBRSTVMM09004 20.0G No 16m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T12:32:15.629 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-03-10T12:32:15.653 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:15 vm03.local ceph-mon[47106]: from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:15.653 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:15 vm03.local ceph-mon[47106]: from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:15.807 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:15.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:15 vm09.local ceph-mon[55914]: from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:15.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:15 vm09.local ceph-mon[55914]: from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:16.075 INFO:teuthology.orchestra.run.vm03.stdout:osd.all-available-devices 8 19s ago 17m * 2026-03-10T12:32:16.136 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-03-10T12:32:16.139 INFO:tasks.vip:Removing 12.12.0.103 (and any VIPs) on vm03.local iface eth0... 2026-03-10T12:32:16.139 DEBUG:teuthology.orchestra.run.vm03:> sudo ip addr del 12.12.0.103/22 dev eth0 2026-03-10T12:32:16.202 DEBUG:teuthology.orchestra.run.vm03:> sudo ip addr del 12.12.1.103/22 dev eth0 2026-03-10T12:32:16.272 INFO:tasks.vip:Removing 12.12.0.109 (and any VIPs) on vm09.local iface eth0... 2026-03-10T12:32:16.272 DEBUG:teuthology.orchestra.run.vm09:> sudo ip addr del 12.12.0.109/22 dev eth0 2026-03-10T12:32:16.301 DEBUG:teuthology.orchestra.run.vm09:> sudo ip addr del 12.12.1.103/22 dev eth0 2026-03-10T12:32:16.366 INFO:teuthology.orchestra.run.vm09.stderr:Error: ipv4: Address not found. 2026-03-10T12:32:16.368 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-03-10T12:32:16.368 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-10T12:32:16.370 INFO:tasks.cephadm:Teardown begin 2026-03-10T12:32:16.370 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T12:32:16.395 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T12:32:16.434 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-10T12:32:16.434 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 -- ceph mgr module disable cephadm 2026-03-10T12:32:16.461 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:16 vm03.local ceph-mon[47106]: pgmap v571: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:32:16.461 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:16 vm03.local ceph-mon[47106]: from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:16.461 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:16 vm03.local ceph-mon[47106]: from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:16.629 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/mon.vm03/config 2026-03-10T12:32:16.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:16 vm09.local ceph-mon[55914]: pgmap v571: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T12:32:16.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:16 vm09.local ceph-mon[55914]: from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:16.641 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:16 vm09.local ceph-mon[55914]: from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T12:32:16.648 INFO:teuthology.orchestra.run.vm03.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-03-10T12:32:16.670 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-10T12:32:16.670 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-10T12:32:16.670 DEBUG:teuthology.orchestra.run.vm03:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T12:32:16.727 DEBUG:teuthology.orchestra.run.vm09:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T12:32:16.744 INFO:tasks.cephadm:Stopping all daemons... 2026-03-10T12:32:16.744 INFO:tasks.cephadm.mon.vm03:Stopping mon.vm03... 2026-03-10T12:32:16.744 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03 2026-03-10T12:32:17.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:16 vm03.local systemd[1]: Stopping Ceph mon.vm03 for 7444ff0e-1c7a-11f1-9305-473e10361f26... 2026-03-10T12:32:17.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:16 vm03.local ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03[47102]: 2026-03-10T12:32:16.955+0000 7fb038af3640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm03 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T12:32:17.160 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 10 12:32:16 vm03.local ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm03[47102]: 2026-03-10T12:32:16.955+0000 7fb038af3640 -1 mon.vm03@0(leader) e2 *** Got Signal Terminated *** 2026-03-10T12:32:17.323 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm03.service' 2026-03-10T12:32:17.360 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T12:32:17.360 INFO:tasks.cephadm.mon.vm03:Stopped mon.vm03 2026-03-10T12:32:17.360 INFO:tasks.cephadm.mon.vm09:Stopping mon.vm09... 2026-03-10T12:32:17.360 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm09 2026-03-10T12:32:17.636 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:17 vm09.local systemd[1]: Stopping Ceph mon.vm09 for 7444ff0e-1c7a-11f1-9305-473e10361f26... 2026-03-10T12:32:17.636 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:17 vm09.local ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm09[55910]: 2026-03-10T12:32:17.474+0000 7f47c594a640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm09 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T12:32:17.636 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:17 vm09.local ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm09[55910]: 2026-03-10T12:32:17.474+0000 7f47c594a640 -1 mon.vm09@1(peon) e2 *** Got Signal Terminated *** 2026-03-10T12:32:17.636 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:17 vm09.local podman[85322]: 2026-03-10 12:32:17.608092972 +0000 UTC m=+0.152547499 container died 71f1d5b5c98cf8b60610f28f3b4d98e6bc6ba9f8c3dc0019e4f92bd04a305756 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm09, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T12:32:17.636 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:17 vm09.local podman[85322]: 2026-03-10 12:32:17.62759395 +0000 UTC m=+0.172048477 container remove 71f1d5b5c98cf8b60610f28f3b4d98e6bc6ba9f8c3dc0019e4f92bd04a305756 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm09, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True) 2026-03-10T12:32:17.636 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 10 12:32:17 vm09.local bash[85322]: ceph-7444ff0e-1c7a-11f1-9305-473e10361f26-mon-vm09 2026-03-10T12:32:17.702 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-7444ff0e-1c7a-11f1-9305-473e10361f26@mon.vm09.service' 2026-03-10T12:32:17.741 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T12:32:17.741 INFO:tasks.cephadm.mon.vm09:Stopped mon.vm09 2026-03-10T12:32:17.741 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 --force --keep-logs 2026-03-10T12:32:17.875 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:33:07.162 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 --force --keep-logs 2026-03-10T12:33:07.287 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:33:55.038 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T12:33:55.066 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T12:33:55.097 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-10T12:33:55.097 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025/remote/vm03/crash 2026-03-10T12:33:55.097 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/crash -- . 2026-03-10T12:33:55.133 INFO:teuthology.orchestra.run.vm03.stderr:tar: /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/crash: Cannot open: No such file or directory 2026-03-10T12:33:55.133 INFO:teuthology.orchestra.run.vm03.stderr:tar: Error is not recoverable: exiting now 2026-03-10T12:33:55.134 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025/remote/vm09/crash 2026-03-10T12:33:55.134 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/crash -- . 2026-03-10T12:33:55.165 INFO:teuthology.orchestra.run.vm09.stderr:tar: /var/lib/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/crash: Cannot open: No such file or directory 2026-03-10T12:33:55.165 INFO:teuthology.orchestra.run.vm09.stderr:tar: Error is not recoverable: exiting now 2026-03-10T12:33:55.167 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-10T12:33:55.167 DEBUG:teuthology.orchestra.run.vm03:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-10T12:33:55.206 INFO:tasks.cephadm:Compressing logs... 2026-03-10T12:33:55.206 DEBUG:teuthology.orchestra.run.vm03:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T12:33:55.248 DEBUG:teuthology.orchestra.run.vm09:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T12:33:55.272 INFO:teuthology.orchestra.run.vm03.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T12:33:55.273 INFO:teuthology.orchestra.run.vm03.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T12:33:55.274 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T12:33:55.274 INFO:teuthology.orchestra.run.vm09.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T12:33:55.274 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mon.vm03.log 2026-03-10T12:33:55.276 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-volume.log 2026-03-10T12:33:55.276 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.ceph-exporter.vm09.log 2026-03-10T12:33:55.277 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-volume.log: 91.7% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T12:33:55.277 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mgr.vm09.pftowo.log 2026-03-10T12:33:55.278 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.log 2026-03-10T12:33:55.278 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.ceph-exporter.vm09.log: 29.8% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.ceph-exporter.vm09.log.gz 2026-03-10T12:33:55.281 INFO:teuthology.orchestra.run.vm09.stderr: 95.8% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-volume.log.gz 2026-03-10T12:33:55.281 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mon.vm09.log 2026-03-10T12:33:55.282 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.audit.log 2026-03-10T12:33:55.284 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mon.vm03.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mgr.vm03.oxmxtj.log 2026-03-10T12:33:55.285 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mgr.vm09.pftowo.log: /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mon.vm09.log: 92.8% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mgr.vm09.pftowo.log.gz 2026-03-10T12:33:55.286 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.log 2026-03-10T12:33:55.287 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.log: 90.0% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.log.gz 2026-03-10T12:33:55.288 INFO:teuthology.orchestra.run.vm03.stderr: 91.7% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T12:33:55.288 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.audit.log: 91.4% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.audit.log.gz 2026-03-10T12:33:55.289 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.audit.log 2026-03-10T12:33:55.289 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.cephadm.log 2026-03-10T12:33:55.290 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.log: 90.1% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.log.gz 2026-03-10T12:33:55.290 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.0.log 2026-03-10T12:33:55.291 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.cephadm.log: 83.0% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.cephadm.log.gz 2026-03-10T12:33:55.291 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.2.log 2026-03-10T12:33:55.294 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mgr.vm03.oxmxtj.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.cephadm.log 2026-03-10T12:33:55.297 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.4.log 2026-03-10T12:33:55.298 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.audit.log: 91.2% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.audit.log.gz 2026-03-10T12:33:55.301 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-volume.log 2026-03-10T12:33:55.301 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.cephadm.log: 83.6% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph.cephadm.log.gz 2026-03-10T12:33:55.306 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.ceph-exporter.vm03.log 2026-03-10T12:33:55.307 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.6.log 2026-03-10T12:33:55.316 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.1.log 2026-03-10T12:33:55.317 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm09.wcqnzb.log 2026-03-10T12:33:55.318 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.ceph-exporter.vm03.log: 95.8% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-volume.log.gz 2026-03-10T12:33:55.318 INFO:teuthology.orchestra.run.vm03.stderr: 94.6% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.ceph-exporter.vm03.log.gz 2026-03-10T12:33:55.324 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.3.log 2026-03-10T12:33:55.325 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm09.jddmdl.log 2026-03-10T12:33:55.327 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm09.wcqnzb.log: 93.6% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm09.wcqnzb.log.gz 2026-03-10T12:33:55.331 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm09.wcqnzb.log 2026-03-10T12:33:55.334 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.5.log 2026-03-10T12:33:55.338 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm09.jddmdl.log 2026-03-10T12:33:55.341 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.7.log 2026-03-10T12:33:55.341 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm09.wcqnzb.log: /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm09.jddmdl.log: 93.5% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm09.jddmdl.log.gz 2026-03-10T12:33:55.346 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm09.jddmdl.log: 93.2% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm09.wcqnzb.log.gz 2026-03-10T12:33:55.351 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm03.pqsxbr.log 2026-03-10T12:33:55.351 INFO:teuthology.orchestra.run.vm09.stderr: 93.3% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm09.jddmdl.log.gz 2026-03-10T12:33:55.358 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.7.log: gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm03.yhnrdc.log 2026-03-10T12:33:55.367 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm03.pqsxbr.log: 93.3% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm03.pqsxbr.log.gz 2026-03-10T12:33:55.370 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm03.pqsxbr.log 2026-03-10T12:33:55.372 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm03.yhnrdc.log: 93.0% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-client.rgw.foo.vm03.yhnrdc.log.gz 2026-03-10T12:33:55.379 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm03.yhnrdc.log 2026-03-10T12:33:55.386 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm03.pqsxbr.log: 93.2% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm03.pqsxbr.log.gz 2026-03-10T12:33:55.399 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm03.yhnrdc.log: 93.1% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ops-log-ceph-client.rgw.foo.vm03.yhnrdc.log.gz 2026-03-10T12:33:55.551 INFO:teuthology.orchestra.run.vm09.stderr: 92.4% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mon.vm09.log.gz 2026-03-10T12:33:55.729 INFO:teuthology.orchestra.run.vm03.stderr: 89.5% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mgr.vm03.oxmxtj.log.gz 2026-03-10T12:33:56.271 INFO:teuthology.orchestra.run.vm03.stderr: 93.2% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.7.log.gz 2026-03-10T12:33:56.308 INFO:teuthology.orchestra.run.vm03.stderr: 90.6% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-mon.vm03.log.gz 2026-03-10T12:33:56.423 INFO:teuthology.orchestra.run.vm09.stderr: 93.2% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.2.log.gz 2026-03-10T12:33:56.443 INFO:teuthology.orchestra.run.vm09.stderr: 93.3% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.0.log.gz 2026-03-10T12:33:56.566 INFO:teuthology.orchestra.run.vm09.stderr: 93.4% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.6.log.gz 2026-03-10T12:33:56.621 INFO:teuthology.orchestra.run.vm09.stderr: 93.6% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.4.log.gz 2026-03-10T12:33:56.623 INFO:teuthology.orchestra.run.vm09.stderr: 2026-03-10T12:33:56.623 INFO:teuthology.orchestra.run.vm09.stderr:real 0m1.360s 2026-03-10T12:33:56.623 INFO:teuthology.orchestra.run.vm09.stderr:user 0m2.522s 2026-03-10T12:33:56.623 INFO:teuthology.orchestra.run.vm09.stderr:sys 0m0.123s 2026-03-10T12:33:56.793 INFO:teuthology.orchestra.run.vm03.stderr: 93.4% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.5.log.gz 2026-03-10T12:33:56.812 INFO:teuthology.orchestra.run.vm03.stderr: 93.6% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.3.log.gz 2026-03-10T12:33:56.899 INFO:teuthology.orchestra.run.vm03.stderr: 93.6% -- replaced with /var/log/ceph/7444ff0e-1c7a-11f1-9305-473e10361f26/ceph-osd.1.log.gz 2026-03-10T12:33:56.902 INFO:teuthology.orchestra.run.vm03.stderr: 2026-03-10T12:33:56.902 INFO:teuthology.orchestra.run.vm03.stderr:real 0m1.640s 2026-03-10T12:33:56.902 INFO:teuthology.orchestra.run.vm03.stderr:user 0m2.625s 2026-03-10T12:33:56.902 INFO:teuthology.orchestra.run.vm03.stderr:sys 0m0.155s 2026-03-10T12:33:56.902 INFO:tasks.cephadm:Archiving logs... 2026-03-10T12:33:56.902 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025/remote/vm03/log 2026-03-10T12:33:56.902 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T12:33:57.131 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025/remote/vm09/log 2026-03-10T12:33:57.131 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T12:33:57.301 INFO:tasks.cephadm:Removing cluster... 2026-03-10T12:33:57.301 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 --force 2026-03-10T12:33:57.442 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:33:57.562 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 7444ff0e-1c7a-11f1-9305-473e10361f26 --force 2026-03-10T12:33:57.697 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 7444ff0e-1c7a-11f1-9305-473e10361f26 2026-03-10T12:33:57.811 INFO:tasks.cephadm:Removing cephadm ... 2026-03-10T12:33:57.811 DEBUG:teuthology.orchestra.run.vm03:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T12:33:57.828 DEBUG:teuthology.orchestra.run.vm09:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T12:33:57.845 INFO:tasks.cephadm:Teardown complete 2026-03-10T12:33:57.845 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-10T12:33:57.848 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-10T12:33:57.848 DEBUG:teuthology.orchestra.run.vm03:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T12:33:57.870 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T12:33:57.885 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-10T12:33:57.903 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-10T12:33:58.152 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T12:33:58.152 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-10T12:33:58.152 INFO:teuthology.orchestra.run.vm09.stdout:^+ 172-104-154-182.ip.linod> 2 6 377 37 +2105us[+2105us] +/- 30ms 2026-03-10T12:33:58.152 INFO:teuthology.orchestra.run.vm09.stdout:^* 141.144.230.32 2 7 377 37 -1554us[-1763us] +/- 14ms 2026-03-10T12:33:58.152 INFO:teuthology.orchestra.run.vm09.stdout:^+ dominus.von-oppen.com 2 7 377 39 -558us[ -768us] +/- 31ms 2026-03-10T12:33:58.152 INFO:teuthology.orchestra.run.vm09.stdout:^+ 139-162-187-236.ip.linod> 2 6 377 36 +1711us[+1711us] +/- 33ms 2026-03-10T12:33:58.153 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T12:33:58.153 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-10T12:33:58.153 INFO:teuthology.orchestra.run.vm03.stdout:^+ 172-104-154-182.ip.linod> 2 6 377 39 +1871us[+1871us] +/- 30ms 2026-03-10T12:33:58.153 INFO:teuthology.orchestra.run.vm03.stdout:^* 141.144.230.32 2 7 377 104 -1774us[-1841us] +/- 14ms 2026-03-10T12:33:58.153 INFO:teuthology.orchestra.run.vm03.stdout:^+ dominus.von-oppen.com 2 7 377 36 -754us[ -754us] +/- 31ms 2026-03-10T12:33:58.153 INFO:teuthology.orchestra.run.vm03.stdout:^+ 139-162-187-236.ip.linod> 2 6 377 40 +1666us[+1666us] +/- 33ms 2026-03-10T12:33:58.153 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-10T12:33:58.156 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-10T12:33:58.156 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-10T12:33:58.158 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-10T12:33:58.161 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-10T12:33:58.163 INFO:teuthology.task.internal:Duration was 1392.748305 seconds 2026-03-10T12:33:58.163 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-10T12:33:58.166 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-10T12:33:58.166 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T12:33:58.196 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T12:33:58.240 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T12:33:58.241 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T12:33:58.688 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-10T12:33:58.688 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm03.local 2026-03-10T12:33:58.689 DEBUG:teuthology.orchestra.run.vm03:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T12:33:58.712 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-03-10T12:33:58.713 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T12:33:58.741 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-10T12:33:58.741 DEBUG:teuthology.orchestra.run.vm03:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T12:33:58.755 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T12:33:59.217 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-10T12:33:59.218 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T12:33:59.220 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T12:33:59.244 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T12:33:59.244 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T12:33:59.244 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T12:33:59.244 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T12:33:59.245 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T12:33:59.246 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T12:33:59.247 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T12:33:59.247 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- 0.0% /home/ubuntu/cephtest/archive/syslog/journalctl.log -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T12:33:59.247 INFO:teuthology.orchestra.run.vm03.stderr: 2026-03-10T12:33:59.247 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T12:33:59.378 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.6% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T12:33:59.382 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.2% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T12:33:59.384 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-10T12:33:59.386 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-10T12:33:59.387 DEBUG:teuthology.orchestra.run.vm03:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T12:33:59.448 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T12:33:59.472 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-10T12:33:59.475 DEBUG:teuthology.orchestra.run.vm03:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T12:33:59.490 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T12:33:59.513 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = core 2026-03-10T12:33:59.536 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-03-10T12:33:59.551 DEBUG:teuthology.orchestra.run.vm03:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T12:33:59.587 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:33:59.588 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T12:33:59.606 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T12:33:59.606 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-10T12:33:59.609 INFO:teuthology.task.internal:Transferring archived files... 2026-03-10T12:33:59.609 DEBUG:teuthology.misc:Transferring archived files from vm03:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025/remote/vm03 2026-03-10T12:33:59.609 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T12:33:59.658 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1025/remote/vm09 2026-03-10T12:33:59.658 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T12:33:59.687 INFO:teuthology.task.internal:Removing archive directory... 2026-03-10T12:33:59.687 DEBUG:teuthology.orchestra.run.vm03:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T12:33:59.699 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T12:33:59.743 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-10T12:33:59.745 INFO:teuthology.task.internal:Not uploading archives. 2026-03-10T12:33:59.745 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-10T12:33:59.748 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-10T12:33:59.748 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T12:33:59.755 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T12:33:59.771 INFO:teuthology.orchestra.run.vm03.stdout: 8532147 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 12:33 /home/ubuntu/cephtest 2026-03-10T12:33:59.799 INFO:teuthology.orchestra.run.vm09.stdout: 8532140 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 12:33 /home/ubuntu/cephtest 2026-03-10T12:33:59.800 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-10T12:33:59.806 INFO:teuthology.run:Summary data: description: orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 1-start 2-services/rgw-ingress 3-final} duration: 1392.7483053207397 owner: kyr success: true 2026-03-10T12:33:59.806 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T12:33:59.843 INFO:teuthology.run:pass