2026-03-09T17:47:05.358 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T17:47:05.362 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T17:47:05.387 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592 branch: squid description: orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} email: null first_in_suite: false flavor: default job_id: '592' last_in_suite: false machine_type: vps name: kyr-2026-03-09_11:23:05-orch-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 3443 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJ7V2WQ54Ao4WtvFa/08aZRpmYZXpD+KH3rviawPB7QzAe5gKqVHxwR9Fhdqzsq2qZYfssYHJhJpHsAwy/BZqWA= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCVQVA4ZVFnxsM5BXjgHq0kOh5QSnSwIu89/EEvMuE0E/JfwbMIY3S7xS2/YOvF03QkLEhdN60Occ5N2gBbuazU= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs - cephadm.apply: specs: - placement: count: 2 service_id: foo service_type: nfs spec: port: 12049 - service_id: nfs.foo service_type: ingress spec: backend_service: nfs.foo frontend_port: 2049 monitor_port: 9002 virtual_ip: '{{VIP0}}/{{VIPPREFIXLEN}}' - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo - cephadm.shell: host.a: - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/fake /mnt/foo - echo test > /mnt/foo/testfile - sync - cephadm.shell: host.a: - "echo \"Check with each haproxy down in turn...\"\nfor haproxy in `ceph orch\ \ ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $haproxy\n while ! ceph orch ps | grep $haproxy | grep stopped; do sleep\ \ 1 ; done\n cat /mnt/foo/testfile\n echo $haproxy > /mnt/foo/testfile\n \ \ sync\n ceph orch daemon start $haproxy\n while ! ceph orch ps | grep $haproxy\ \ | grep running; do sleep 1 ; done\ndone\n" volumes: - /mnt/foo:/mnt/foo - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-09_11:23:05 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T17:47:05.387 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T17:47:05.388 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T17:47:05.388 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T17:47:05.388 INFO:teuthology.task.internal:Checking packages... 2026-03-09T17:47:05.388 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T17:47:05.388 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T17:47:05.388 INFO:teuthology.packaging:ref: None 2026-03-09T17:47:05.388 INFO:teuthology.packaging:tag: None 2026-03-09T17:47:05.388 INFO:teuthology.packaging:branch: squid 2026-03-09T17:47:05.388 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T17:47:05.388 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-09T17:47:06.132 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-09T17:47:06.133 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T17:47:06.134 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T17:47:06.134 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T17:47:06.135 INFO:teuthology.task.internal:Saving configuration 2026-03-09T17:47:06.139 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T17:47:06.140 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T17:47:06.147 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 17:45:33.641869', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJ7V2WQ54Ao4WtvFa/08aZRpmYZXpD+KH3rviawPB7QzAe5gKqVHxwR9Fhdqzsq2qZYfssYHJhJpHsAwy/BZqWA='} 2026-03-09T17:47:06.151 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 17:45:33.642309', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCVQVA4ZVFnxsM5BXjgHq0kOh5QSnSwIu89/EEvMuE0E/JfwbMIY3S7xS2/YOvF03QkLEhdN60Occ5N2gBbuazU='} 2026-03-09T17:47:06.151 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T17:47:06.152 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.a', 'client.0'] 2026-03-09T17:47:06.152 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['host.b', 'client.1'] 2026-03-09T17:47:06.152 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T17:47:06.159 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-09T17:47:06.165 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-03-09T17:47:06.165 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fc883172170>, signals=[15]) 2026-03-09T17:47:06.165 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T17:47:06.166 INFO:teuthology.task.internal:Opening connections... 2026-03-09T17:47:06.166 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-09T17:47:06.166 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:47:06.226 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-03-09T17:47:06.226 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:47:06.287 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T17:47:06.288 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-09T17:47:06.335 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-09T17:47:06.335 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:NAME="CentOS Stream" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="9" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:ID="centos" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE="rhel fedora" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="9" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:PLATFORM_ID="platform:el9" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:ANSI_COLOR="0;31" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:LOGO="fedora-logo-icon" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://centos.org/" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T17:47:06.390 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T17:47:06.390 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-09T17:47:06.395 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-03-09T17:47:06.409 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-03-09T17:47:06.409 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:NAME="CentOS Stream" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="9" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:ID="centos" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE="rhel fedora" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="9" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:PLATFORM_ID="platform:el9" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:ANSI_COLOR="0;31" 2026-03-09T17:47:06.462 INFO:teuthology.orchestra.run.vm09.stdout:LOGO="fedora-logo-icon" 2026-03-09T17:47:06.463 INFO:teuthology.orchestra.run.vm09.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T17:47:06.463 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://centos.org/" 2026-03-09T17:47:06.463 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T17:47:06.463 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T17:47:06.463 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T17:47:06.463 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-03-09T17:47:06.467 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T17:47:06.469 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T17:47:06.470 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T17:47:06.470 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-09T17:47:06.471 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-03-09T17:47:06.516 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T17:47:06.517 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T17:47:06.517 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-09T17:47:06.525 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-03-09T17:47:06.537 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T17:47:06.570 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T17:47:06.571 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T17:47:06.579 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-09T17:47:06.592 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:47:06.788 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-03-09T17:47:06.802 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:47:06.990 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T17:47:06.991 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T17:47:06.991 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T17:47:06.993 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T17:47:07.009 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T17:47:07.010 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T17:47:07.011 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T17:47:07.011 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T17:47:07.049 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T17:47:07.066 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T17:47:07.067 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T17:47:07.067 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T17:47:07.117 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:47:07.117 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T17:47:07.132 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:47:07.133 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T17:47:07.160 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T17:47:07.183 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:47:07.192 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:47:07.197 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:47:07.206 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:47:07.207 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T17:47:07.208 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T17:47:07.208 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T17:47:07.235 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T17:47:07.270 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T17:47:07.272 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T17:47:07.273 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T17:47:07.299 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T17:47:07.324 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:47:07.374 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:47:07.431 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:47:07.431 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T17:47:07.487 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:47:07.509 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:47:07.566 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:47:07.566 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T17:47:07.623 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-09T17:47:07.624 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-03-09T17:47:07.649 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T17:47:07.689 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T17:47:08.110 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T17:47:08.111 INFO:teuthology.task.internal:Starting timer... 2026-03-09T17:47:08.111 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T17:47:08.114 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T17:47:08.116 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-09T17:47:08.116 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-09T17:47:08.116 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-03-09T17:47:08.116 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T17:47:08.116 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T17:47:08.116 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T17:47:08.116 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T17:47:08.118 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T17:47:08.118 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T17:47:08.120 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T17:47:08.755 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T17:47:08.760 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T17:47:08.761 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventory9ehl5l6a --limit vm06.local,vm09.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T17:49:28.599 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm06.local'), Remote(name='ubuntu@vm09.local')] 2026-03-09T17:49:28.600 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-09T17:49:28.600 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:49:28.664 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-09T17:49:28.735 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-09T17:49:28.735 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-03-09T17:49:28.736 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:49:28.802 DEBUG:teuthology.orchestra.run.vm09:> true 2026-03-09T17:49:28.877 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-03-09T17:49:28.877 INFO:teuthology.run_tasks:Running task clock... 2026-03-09T17:49:28.880 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-09T17:49:28.880 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T17:49:28.880 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:49:28.881 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T17:49:28.881 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:49:28.919 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T17:49:28.941 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T17:49:28.959 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T17:49:28.976 INFO:teuthology.orchestra.run.vm06.stderr:sudo: ntpd: command not found 2026-03-09T17:49:28.977 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T17:49:28.986 INFO:teuthology.orchestra.run.vm06.stdout:506 Cannot talk to daemon 2026-03-09T17:49:29.002 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T17:49:29.004 INFO:teuthology.orchestra.run.vm09.stderr:sudo: ntpd: command not found 2026-03-09T17:49:29.018 INFO:teuthology.orchestra.run.vm09.stdout:506 Cannot talk to daemon 2026-03-09T17:49:29.019 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T17:49:29.038 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T17:49:29.055 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T17:49:29.071 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-09T17:49:29.073 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T17:49:29.073 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-09T17:49:29.103 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-09T17:49:29.105 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T17:49:29.105 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-09T17:49:29.106 INFO:teuthology.run_tasks:Running task pexec... 2026-03-09T17:49:29.109 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-09T17:49:29.109 DEBUG:teuthology.orchestra.run.vm06:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-09T17:49:29.109 DEBUG:teuthology.orchestra.run.vm09:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-09T17:49:29.116 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf remove nvme-cli -y 2026-03-09T17:49:29.116 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-09T17:49:29.116 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm06.local 2026-03-09T17:49:29.116 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-09T17:49:29.117 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-09T17:49:29.148 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf remove nvme-cli -y 2026-03-09T17:49:29.148 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-09T17:49:29.148 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm09.local 2026-03-09T17:49:29.148 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-09T17:49:29.148 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-09T17:49:29.346 INFO:teuthology.orchestra.run.vm06.stdout:No match for argument: nvme-cli 2026-03-09T17:49:29.347 INFO:teuthology.orchestra.run.vm06.stderr:No packages marked for removal. 2026-03-09T17:49:29.350 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-09T17:49:29.350 INFO:teuthology.orchestra.run.vm06.stdout:Nothing to do. 2026-03-09T17:49:29.353 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-09T17:49:29.382 INFO:teuthology.orchestra.run.vm09.stdout:No match for argument: nvme-cli 2026-03-09T17:49:29.382 INFO:teuthology.orchestra.run.vm09.stderr:No packages marked for removal. 2026-03-09T17:49:29.385 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-09T17:49:29.386 INFO:teuthology.orchestra.run.vm09.stdout:Nothing to do. 2026-03-09T17:49:29.386 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-09T17:49:29.851 INFO:teuthology.orchestra.run.vm06.stdout:Last metadata expiration check: 0:01:23 ago on Mon 09 Mar 2026 05:48:06 PM UTC. 2026-03-09T17:49:29.869 INFO:teuthology.orchestra.run.vm09.stdout:Last metadata expiration check: 0:01:11 ago on Mon 09 Mar 2026 05:48:18 PM UTC. 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: Package Architecture Version Repository Size 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:Installing: 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:Installing dependencies: 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:Transaction Summary 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout:Install 6 Packages 2026-03-09T17:49:29.970 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:49:29.971 INFO:teuthology.orchestra.run.vm09.stdout:Total download size: 2.3 M 2026-03-09T17:49:29.971 INFO:teuthology.orchestra.run.vm09.stdout:Installed size: 11 M 2026-03-09T17:49:29.971 INFO:teuthology.orchestra.run.vm09.stdout:Downloading Packages: 2026-03-09T17:49:29.983 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: Package Architecture Version Repository Size 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:Installing: 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:Installing dependencies: 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:Transaction Summary 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:Install 6 Packages 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:Total download size: 2.3 M 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:Installed size: 11 M 2026-03-09T17:49:29.984 INFO:teuthology.orchestra.run.vm06.stdout:Downloading Packages: 2026-03-09T17:49:30.548 INFO:teuthology.orchestra.run.vm09.stdout:(1/6): python3-configshell-1.1.30-1.el9.noarch. 598 kB/s | 72 kB 00:00 2026-03-09T17:49:30.548 INFO:teuthology.orchestra.run.vm09.stdout:(2/6): nvmetcli-0.8-3.el9.noarch.rpm 362 kB/s | 44 kB 00:00 2026-03-09T17:49:30.557 INFO:teuthology.orchestra.run.vm06.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 408 kB/s | 44 kB 00:00 2026-03-09T17:49:30.591 INFO:teuthology.orchestra.run.vm06.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 509 kB/s | 72 kB 00:00 2026-03-09T17:49:30.609 INFO:teuthology.orchestra.run.vm09.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 1.4 MB/s | 84 kB 00:00 2026-03-09T17:49:30.610 INFO:teuthology.orchestra.run.vm09.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.4 MB/s | 150 kB 00:00 2026-03-09T17:49:30.647 INFO:teuthology.orchestra.run.vm06.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 943 kB/s | 84 kB 00:00 2026-03-09T17:49:30.662 INFO:teuthology.orchestra.run.vm06.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.1 MB/s | 150 kB 00:00 2026-03-09T17:49:30.673 INFO:teuthology.orchestra.run.vm09.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 4.7 MB/s | 1.2 MB 00:00 2026-03-09T17:49:30.682 INFO:teuthology.orchestra.run.vm06.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 5.0 MB/s | 1.2 MB 00:00 2026-03-09T17:49:30.765 INFO:teuthology.orchestra.run.vm06.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 7.0 MB/s | 837 kB 00:00 2026-03-09T17:49:30.767 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-03-09T17:49:30.776 INFO:teuthology.orchestra.run.vm06.stdout:Total 3.0 MB/s | 2.3 MB 00:00 2026-03-09T17:49:30.867 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction check 2026-03-09T17:49:30.879 INFO:teuthology.orchestra.run.vm06.stdout:Transaction check succeeded. 2026-03-09T17:49:30.892 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction test 2026-03-09T17:49:30.914 INFO:teuthology.orchestra.run.vm09.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 2.7 MB/s | 837 kB 00:00 2026-03-09T17:49:30.914 INFO:teuthology.orchestra.run.vm09.stdout:-------------------------------------------------------------------------------- 2026-03-09T17:49:30.914 INFO:teuthology.orchestra.run.vm09.stdout:Total 2.4 MB/s | 2.3 MB 00:00 2026-03-09T17:49:30.952 INFO:teuthology.orchestra.run.vm06.stdout:Transaction test succeeded. 2026-03-09T17:49:30.952 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction 2026-03-09T17:49:30.985 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction check 2026-03-09T17:49:30.994 INFO:teuthology.orchestra.run.vm09.stdout:Transaction check succeeded. 2026-03-09T17:49:30.994 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction test 2026-03-09T17:49:31.058 INFO:teuthology.orchestra.run.vm09.stdout:Transaction test succeeded. 2026-03-09T17:49:31.058 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction 2026-03-09T17:49:31.169 INFO:teuthology.orchestra.run.vm06.stdout: Preparing : 1/1 2026-03-09T17:49:31.184 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-09T17:49:31.197 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-09T17:49:31.204 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-09T17:49:31.216 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-09T17:49:31.217 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-09T17:49:31.241 INFO:teuthology.orchestra.run.vm09.stdout: Preparing : 1/1 2026-03-09T17:49:31.254 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-09T17:49:31.268 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-09T17:49:31.276 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-09T17:49:31.286 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-09T17:49:31.289 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-09T17:49:31.407 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-09T17:49:31.411 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-09T17:49:31.469 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-09T17:49:31.487 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-09T17:49:31.807 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-09T17:49:31.807 INFO:teuthology.orchestra.run.vm06.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-09T17:49:31.807 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:49:31.857 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-09T17:49:31.857 INFO:teuthology.orchestra.run.vm09.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-09T17:49:31.857 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:49:32.426 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-09T17:49:32.426 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-09T17:49:32.426 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-09T17:49:32.426 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-09T17:49:32.426 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-09T17:49:32.432 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-09T17:49:32.432 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-09T17:49:32.433 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-09T17:49:32.433 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-09T17:49:32.433 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout:Installed: 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:49:32.527 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout:Installed: 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:49:32.533 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-09T17:49:32.605 DEBUG:teuthology.parallel:result is None 2026-03-09T17:49:32.607 DEBUG:teuthology.parallel:result is None 2026-03-09T17:49:32.607 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-03-09T17:49:32.610 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-03-09T17:49:32.610 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:49:32.610 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T17:49:32.633 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:49:32.633 DEBUG:teuthology.orchestra.run.vm06:> ls /dev/[sv]d? 2026-03-09T17:49:32.697 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vda 2026-03-09T17:49:32.697 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb 2026-03-09T17:49:32.697 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc 2026-03-09T17:49:32.697 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd 2026-03-09T17:49:32.697 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde 2026-03-09T17:49:32.698 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T17:49:32.698 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T17:49:32.698 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdb 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdb 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:49:31.897856925 +0000 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:31.897856925 +0000 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:31.897856925 +0000 2026-03-09T17:49:32.767 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:45:41.251000000 +0000 2026-03-09T17:49:32.769 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T17:49:32.848 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:49:32.848 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:49:32.848 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000176532 s, 2.9 MB/s 2026-03-09T17:49:32.849 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T17:49:32.913 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdc 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdc 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:49:31.891856919 +0000 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:31.891856919 +0000 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:31.891856919 +0000 2026-03-09T17:49:32.977 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:45:41.260000000 +0000 2026-03-09T17:49:32.977 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T17:49:33.047 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:49:33.047 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:49:33.047 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000120375 s, 4.3 MB/s 2026-03-09T17:49:33.048 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T17:49:33.112 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdd 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdd 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:49:31.899856927 +0000 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:31.899856927 +0000 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:31.899856927 +0000 2026-03-09T17:49:33.176 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:45:41.264000000 +0000 2026-03-09T17:49:33.176 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T17:49:33.247 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:49:33.247 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:49:33.247 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000186138 s, 2.8 MB/s 2026-03-09T17:49:33.248 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T17:49:33.317 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vde 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vde 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:49:31.893856921 +0000 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:31.893856921 +0000 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:31.893856921 +0000 2026-03-09T17:49:33.382 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:45:41.302000000 +0000 2026-03-09T17:49:33.382 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T17:49:33.452 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:49:33.452 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:49:33.452 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000134863 s, 3.8 MB/s 2026-03-09T17:49:33.453 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T17:49:33.518 DEBUG:teuthology.orchestra.run.vm06:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-03-09T17:49:33.670 INFO:teuthology.orchestra.run.vm06.stdout:loop 2026-03-09T17:49:33.674 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vdb... 2026-03-09T17:49:33.674 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1 && echo -n /dev/vdb | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdb /sys/kernel/config/nvmet/ports/1/subsystems/vdb && sudo nvme connect -t loop -n vdb -q hostnqn 2026-03-09T17:49:33.722 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-03-09T17:49:33.760 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb1 2026-03-09T17:49:33.795 INFO:teuthology.orchestra.run.vm06.stdout:connecting to device: nvme0 2026-03-09T17:49:33.797 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vdc... 2026-03-09T17:49:33.797 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1 && echo -n /dev/vdc | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdc /sys/kernel/config/nvmet/ports/1/subsystems/vdc && sudo nvme connect -t loop -n vdc -q hostnqn 2026-03-09T17:49:33.846 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-03-09T17:49:33.890 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc1 2026-03-09T17:49:33.930 INFO:teuthology.orchestra.run.vm06.stdout:connecting to device: nvme1 2026-03-09T17:49:33.933 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vdd... 2026-03-09T17:49:33.933 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1 && echo -n /dev/vdd | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdd /sys/kernel/config/nvmet/ports/1/subsystems/vdd && sudo nvme connect -t loop -n vdd -q hostnqn 2026-03-09T17:49:33.979 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-03-09T17:49:34.018 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd1 2026-03-09T17:49:34.053 INFO:teuthology.orchestra.run.vm06.stdout:connecting to device: nvme2 2026-03-09T17:49:34.056 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vde... 2026-03-09T17:49:34.056 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde/namespaces/1 && echo -n /dev/vde | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vde /sys/kernel/config/nvmet/ports/1/subsystems/vde && sudo nvme connect -t loop -n vde -q hostnqn 2026-03-09T17:49:34.099 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-03-09T17:49:34.137 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde1 2026-03-09T17:49:34.173 INFO:teuthology.orchestra.run.vm06.stdout:connecting to device: nvme3 2026-03-09T17:49:34.181 DEBUG:teuthology.orchestra.run.vm06:> lsblk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:sr0 11:0 1 366K 0 rom 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:vda 252:0 0 40G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:└─vda1 252:1 0 40G 0 part / 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:vdb 252:16 0 20G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:vdc 252:32 0 20G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:vdd 252:48 0 20G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:vde 252:64 0 20G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:nvme0n1 259:1 0 20G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:nvme1n1 259:2 0 20G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:nvme2n1 259:5 0 20G 0 disk 2026-03-09T17:49:34.204 INFO:teuthology.orchestra.run.vm06.stdout:nvme3n1 259:7 0 20G 0 disk 2026-03-09T17:49:34.204 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme list -o json 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "Devices":[ 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace":1, 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath":"/dev/nvme0n1", 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "GenericPath":"/dev/ng0n1", 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber":"841c956692c72d757122", 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize":512 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: }, 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace":1, 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath":"/dev/nvme1n1", 2026-03-09T17:49:34.280 INFO:teuthology.orchestra.run.vm06.stdout: "GenericPath":"/dev/ng1n1", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber":"6369ab35119912cda79b", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize":512 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: }, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace":1, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath":"/dev/nvme2n1", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "GenericPath":"/dev/ng2n1", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber":"eebfdeda674d069af3bb", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize":512 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: }, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace":1, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath":"/dev/nvme3n1", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "GenericPath":"/dev/ng3n1", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber":"0807911388adb67b2a22", 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize":512 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: } 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-03-09T17:49:34.281 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-09T17:49:34.281 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-03-09T17:49:34.353 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:34.353 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:34.353 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00384589 s, 1.1 MB/s 2026-03-09T17:49:34.355 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-03-09T17:49:34.427 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:34.427 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:34.427 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-03-09T17:49:34.428 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:34.504 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:34.504 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:34.504 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.005505 s, 744 kB/s 2026-03-09T17:49:34.506 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-03-09T17:49:34.576 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:34.576 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:34.576 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-03-09T17:49:34.577 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:34.651 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:34.651 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:34.651 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00672888 s, 609 kB/s 2026-03-09T17:49:34.652 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-03-09T17:49:34.726 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:34.726 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:34.726 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-03-09T17:49:34.727 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-03-09T17:49:34.801 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:34.801 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:34.801 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00613838 s, 667 kB/s 2026-03-09T17:49:34.803 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-03-09T17:49:34.872 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:34.872 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:34.872 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-03-09T17:49:34.873 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:34.945 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:34.945 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:34.945 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00413001 s, 992 kB/s 2026-03-09T17:49:34.946 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-03-09T17:49:35.021 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:35.022 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:35.022 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-03-09T17:49:35.023 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:35.099 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:35.099 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:35.099 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0039233 s, 1.0 MB/s 2026-03-09T17:49:35.101 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-03-09T17:49:35.173 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:35.173 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:35.173 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-03-09T17:49:35.174 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-03-09T17:49:35.252 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:35.252 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:35.252 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00434096 s, 944 kB/s 2026-03-09T17:49:35.253 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-03-09T17:49:35.333 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:35.333 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:35.333 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-03-09T17:49:35.334 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:35.409 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:35.409 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:35.409 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00488518 s, 838 kB/s 2026-03-09T17:49:35.417 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-03-09T17:49:35.494 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:35.494 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:35.494 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-03-09T17:49:35.495 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:35.576 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:35.576 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:35.576 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00349987 s, 1.2 MB/s 2026-03-09T17:49:35.577 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-03-09T17:49:35.656 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:35.656 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:35.656 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-03-09T17:49:35.660 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-03-09T17:49:35.739 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:35.739 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:35.739 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00376616 s, 1.1 MB/s 2026-03-09T17:49:35.740 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-03-09T17:49:35.814 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:35.814 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:35.814 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-03-09T17:49:35.815 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:35.895 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:35.895 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:35.895 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00581665 s, 704 kB/s 2026-03-09T17:49:35.897 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-03-09T17:49:35.975 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:35.975 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:35.975 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-03-09T17:49:35.976 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:36.054 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-03-09T17:49:36.054 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-03-09T17:49:36.054 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00677049 s, 605 kB/s 2026-03-09T17:49:36.056 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-03-09T17:49:36.128 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:36.128 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:36.128 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-03-09T17:49:36.130 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-09T17:49:36.130 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:49:36.130 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/scratch_devs 2026-03-09T17:49:36.210 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:49:36.210 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T17:49:36.236 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:49:36.237 DEBUG:teuthology.orchestra.run.vm09:> ls /dev/[sv]d? 2026-03-09T17:49:36.303 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vda 2026-03-09T17:49:36.304 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdb 2026-03-09T17:49:36.304 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdc 2026-03-09T17:49:36.304 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdd 2026-03-09T17:49:36.304 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vde 2026-03-09T17:49:36.304 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T17:49:36.304 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T17:49:36.304 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdb 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdb 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,10 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:49:31.926218886 +0000 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:31.926218886 +0000 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:31.926218886 +0000 2026-03-09T17:49:36.371 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:46:17.217000000 +0000 2026-03-09T17:49:36.372 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T17:49:36.446 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:49:36.446 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:49:36.446 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000141381 s, 3.6 MB/s 2026-03-09T17:49:36.457 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T17:49:36.518 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdc 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdc 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 230 Links: 1 Device type: fc,20 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:49:31.917218877 +0000 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:31.917218877 +0000 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:31.917218877 +0000 2026-03-09T17:49:36.581 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:46:17.224000000 +0000 2026-03-09T17:49:36.581 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T17:49:36.657 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:49:36.657 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:49:36.657 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000137172 s, 3.7 MB/s 2026-03-09T17:49:36.658 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T17:49:36.722 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdd 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdd 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:49:31.936218897 +0000 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:31.936218897 +0000 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:31.936218897 +0000 2026-03-09T17:49:36.786 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:46:17.232000000 +0000 2026-03-09T17:49:36.787 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T17:49:36.853 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:49:36.853 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:49:36.853 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000204481 s, 2.5 MB/s 2026-03-09T17:49:36.858 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T17:49:36.922 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vde 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vde 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:49:31.920218880 +0000 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:31.920218880 +0000 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:31.920218880 +0000 2026-03-09T17:49:36.985 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:46:17.244000000 +0000 2026-03-09T17:49:36.985 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T17:49:37.053 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:49:37.053 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:49:37.053 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000195753 s, 2.6 MB/s 2026-03-09T17:49:37.055 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T17:49:37.124 DEBUG:teuthology.orchestra.run.vm09:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-03-09T17:49:37.270 INFO:teuthology.orchestra.run.vm09.stdout:loop 2026-03-09T17:49:37.271 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vdb... 2026-03-09T17:49:37.271 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1 && echo -n /dev/vdb | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdb /sys/kernel/config/nvmet/ports/1/subsystems/vdb && sudo nvme connect -t loop -n vdb -q hostnqn 2026-03-09T17:49:37.317 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-09T17:49:37.354 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdb1 2026-03-09T17:49:37.391 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme0 2026-03-09T17:49:37.393 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vdc... 2026-03-09T17:49:37.393 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1 && echo -n /dev/vdc | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdc /sys/kernel/config/nvmet/ports/1/subsystems/vdc && sudo nvme connect -t loop -n vdc -q hostnqn 2026-03-09T17:49:37.442 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-09T17:49:37.479 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdc1 2026-03-09T17:49:37.511 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme1 2026-03-09T17:49:37.512 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vdd... 2026-03-09T17:49:37.512 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1 && echo -n /dev/vdd | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdd /sys/kernel/config/nvmet/ports/1/subsystems/vdd && sudo nvme connect -t loop -n vdd -q hostnqn 2026-03-09T17:49:37.566 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-09T17:49:37.601 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdd1 2026-03-09T17:49:37.637 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme2 2026-03-09T17:49:37.638 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vde... 2026-03-09T17:49:37.638 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde/namespaces/1 && echo -n /dev/vde | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vde /sys/kernel/config/nvmet/ports/1/subsystems/vde && sudo nvme connect -t loop -n vde -q hostnqn 2026-03-09T17:49:37.689 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-09T17:49:37.725 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vde1 2026-03-09T17:49:37.758 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme3 2026-03-09T17:49:37.765 DEBUG:teuthology.orchestra.run.vm09:> lsblk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:sr0 11:0 1 366K 0 rom 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:vda 252:0 0 40G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:└─vda1 252:1 0 40G 0 part / 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:vdb 252:16 0 20G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:vdc 252:32 0 20G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:vdd 252:48 0 20G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:vde 252:64 0 20G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:nvme0n1 259:1 0 20G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:nvme1n1 259:3 0 20G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:nvme2n1 259:5 0 20G 0 disk 2026-03-09T17:49:37.790 INFO:teuthology.orchestra.run.vm09.stdout:nvme3n1 259:7 0 20G 0 disk 2026-03-09T17:49:37.791 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme list -o json 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "Devices":[ 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme0n1", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng0n1", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"1306f41d212284cf600a", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme1n1", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng1n1", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"c5ac994bece5cd285c2d", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme2n1", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng2n1", 2026-03-09T17:49:37.860 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"a67ce17ff328133e3623", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme3n1", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng3n1", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"c16408162be630ff3c6d", 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: } 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-03-09T17:49:37.861 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-03-09T17:49:37.862 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-03-09T17:49:37.938 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:37.938 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:37.938 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00433049 s, 946 kB/s 2026-03-09T17:49:37.939 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-03-09T17:49:38.017 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:38.017 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:38.017 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-09T17:49:38.018 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:38.094 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:38.094 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:38.094 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00475424 s, 862 kB/s 2026-03-09T17:49:38.095 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-03-09T17:49:38.168 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:38.168 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:38.168 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-09T17:49:38.169 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:38.244 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:38.244 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:38.244 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00363884 s, 1.1 MB/s 2026-03-09T17:49:38.245 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-03-09T17:49:38.316 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:38.316 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:38.316 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-09T17:49:38.317 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-03-09T17:49:38.392 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:38.393 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:38.393 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00521399 s, 786 kB/s 2026-03-09T17:49:38.394 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-03-09T17:49:38.466 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:38.466 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:38.466 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-09T17:49:38.467 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:38.541 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:38.541 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:38.541 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00456602 s, 897 kB/s 2026-03-09T17:49:38.542 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-03-09T17:49:38.610 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:38.610 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:38.610 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-09T17:49:38.611 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:38.683 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:38.683 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:38.683 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0048082 s, 852 kB/s 2026-03-09T17:49:38.688 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-03-09T17:49:38.754 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:38.754 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:38.754 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-09T17:49:38.755 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-03-09T17:49:38.829 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:38.829 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:38.829 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00377149 s, 1.1 MB/s 2026-03-09T17:49:38.830 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-03-09T17:49:38.902 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:38.902 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:38.902 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-09T17:49:38.903 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:38.987 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:38.987 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:38.987 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00429545 s, 954 kB/s 2026-03-09T17:49:38.995 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-03-09T17:49:39.026 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:39.027 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:39.027 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-09T17:49:39.028 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:39.116 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:39.116 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:39.116 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00562639 s, 728 kB/s 2026-03-09T17:49:39.120 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-03-09T17:49:39.151 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:39.151 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:39.151 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-09T17:49:39.152 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-03-09T17:49:39.235 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:39.236 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:39.236 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00928714 s, 441 kB/s 2026-03-09T17:49:39.240 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-03-09T17:49:39.311 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:39.311 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:39.311 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-09T17:49:39.312 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-03-09T17:49:39.390 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:39.390 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:39.390 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00690369 s, 593 kB/s 2026-03-09T17:49:39.391 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-03-09T17:49:39.466 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:39.466 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:39.466 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-09T17:49:39.467 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-03-09T17:49:39.546 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-09T17:49:39.547 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-09T17:49:39.547 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00518871 s, 789 kB/s 2026-03-09T17:49:39.554 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-03-09T17:49:39.621 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-09T17:49:39.621 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-09T17:49:39.621 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-09T17:49:39.623 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-09T17:49:39.623 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:49:39.623 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/scratch_devs 2026-03-09T17:49:39.695 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-09T17:49:39.748 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-09T17:49:39.748 INFO:tasks.cephadm:Cluster image is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T17:49:39.748 INFO:tasks.cephadm:Cluster fsid is 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:49:39.748 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-09T17:49:39.748 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-09T17:49:39.748 INFO:tasks.cephadm:Monitor IPs: {'mon.vm06': '192.168.123.106', 'mon.vm09': '192.168.123.109'} 2026-03-09T17:49:39.748 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-09T17:49:39.748 DEBUG:teuthology.orchestra.run.vm06:> sudo hostname $(hostname -s) 2026-03-09T17:49:39.775 DEBUG:teuthology.orchestra.run.vm09:> sudo hostname $(hostname -s) 2026-03-09T17:49:39.809 INFO:tasks.cephadm:Downloading "compiled" cephadm from cachra 2026-03-09T17:49:39.809 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T17:49:40.509 INFO:tasks.cephadm:builder_project result: [{'url': 'https://3.chacra.ceph.com/r/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'chacra_url': 'https://3.chacra.ceph.com/repos/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'ref': 'squid', 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df', 'distro': 'centos', 'distro_version': '9', 'distro_codename': None, 'modified': '2026-02-25 18:55:15.146628', 'status': 'ready', 'flavor': 'default', 'project': 'ceph', 'archs': ['source', 'x86_64'], 'extra': {'version': '19.2.3-678-ge911bdeb', 'package_manager_version': '19.2.3-678.ge911bdeb', 'build_url': 'https://jenkins.ceph.com/job/ceph-dev-pipeline/3275/', 'root_build_cause': '', 'node_name': '10.20.192.26+soko16', 'job_name': 'ceph-dev-pipeline'}}] 2026-03-09T17:49:41.093 INFO:tasks.util.chacra:got chacra host 3.chacra.ceph.com, ref squid, sha1 e911bdebe5c8faa3800735d1568fcdca65db60df from https://shaman.ceph.com/api/search/?project=ceph&distros=centos%2F9%2Fx86_64&flavor=default&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T17:49:41.094 INFO:tasks.cephadm:Discovered cachra url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-09T17:49:41.094 INFO:tasks.cephadm:Downloading cephadm from url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-09T17:49:41.095 DEBUG:teuthology.orchestra.run.vm06:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T17:49:42.640 INFO:teuthology.orchestra.run.vm06.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 9 17:49 /home/ubuntu/cephtest/cephadm 2026-03-09T17:49:42.640 DEBUG:teuthology.orchestra.run.vm09:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T17:49:44.186 INFO:teuthology.orchestra.run.vm09.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 9 17:49 /home/ubuntu/cephtest/cephadm 2026-03-09T17:49:44.186 DEBUG:teuthology.orchestra.run.vm06:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T17:49:44.207 DEBUG:teuthology.orchestra.run.vm09:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T17:49:44.235 INFO:tasks.cephadm:Pulling image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on all hosts... 2026-03-09T17:49:44.235 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-09T17:49:44.252 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-09T17:49:44.445 INFO:teuthology.orchestra.run.vm06.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T17:49:44.480 INFO:teuthology.orchestra.run.vm09.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T17:50:21.179 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-09T17:50:21.179 INFO:teuthology.orchestra.run.vm06.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-09T17:50:21.179 INFO:teuthology.orchestra.run.vm06.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-09T17:50:21.179 INFO:teuthology.orchestra.run.vm06.stdout: "repo_digests": [ 2026-03-09T17:50:21.179 INFO:teuthology.orchestra.run.vm06.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-09T17:50:21.179 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-03-09T17:50:21.179 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-09T17:50:22.209 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-03-09T17:50:22.209 INFO:teuthology.orchestra.run.vm09.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-09T17:50:22.209 INFO:teuthology.orchestra.run.vm09.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-09T17:50:22.209 INFO:teuthology.orchestra.run.vm09.stdout: "repo_digests": [ 2026-03-09T17:50:22.209 INFO:teuthology.orchestra.run.vm09.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-09T17:50:22.209 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-03-09T17:50:22.209 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-03-09T17:50:22.227 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /etc/ceph 2026-03-09T17:50:22.262 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /etc/ceph 2026-03-09T17:50:22.295 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 777 /etc/ceph 2026-03-09T17:50:22.330 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 777 /etc/ceph 2026-03-09T17:50:22.365 INFO:tasks.cephadm:Writing seed config... 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-09T17:50:22.366 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-09T17:50:22.366 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:50:22.366 DEBUG:teuthology.orchestra.run.vm06:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-09T17:50:22.391 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 588d7312-1be0-11f1-b5b6-61233c7d7c44 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-09T17:50:22.392 DEBUG:teuthology.orchestra.run.vm06:mon.vm06> sudo journalctl -f -n 0 -u ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm06.service 2026-03-09T17:50:22.433 INFO:tasks.cephadm:Bootstrapping... 2026-03-09T17:50:22.433 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df -v bootstrap --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.106 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-09T17:50:22.604 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-03-09T17:50:22.604 INFO:teuthology.orchestra.run.vm06.stdout:cephadm ['--image', 'quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df', '-v', 'bootstrap', '--fsid', '588d7312-1be0-11f1-b5b6-61233c7d7c44', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.106', '--skip-admin-label'] 2026-03-09T17:50:22.604 INFO:teuthology.orchestra.run.vm06.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-09T17:50:22.604 INFO:teuthology.orchestra.run.vm06.stdout:Verifying podman|docker is present... 2026-03-09T17:50:22.629 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stdout 5.8.0 2026-03-09T17:50:22.629 INFO:teuthology.orchestra.run.vm06.stdout:Verifying lvm2 is present... 2026-03-09T17:50:22.629 INFO:teuthology.orchestra.run.vm06.stdout:Verifying time synchronization is in place... 2026-03-09T17:50:22.638 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-09T17:50:22.638 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T17:50:22.650 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-09T17:50:22.650 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-03-09T17:50:22.659 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout enabled 2026-03-09T17:50:22.666 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout active 2026-03-09T17:50:22.666 INFO:teuthology.orchestra.run.vm06.stdout:Unit chronyd.service is enabled and running 2026-03-09T17:50:22.666 INFO:teuthology.orchestra.run.vm06.stdout:Repeating the final host check... 2026-03-09T17:50:22.692 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stdout 5.8.0 2026-03-09T17:50:22.692 INFO:teuthology.orchestra.run.vm06.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-09T17:50:22.692 INFO:teuthology.orchestra.run.vm06.stdout:systemctl is present 2026-03-09T17:50:22.692 INFO:teuthology.orchestra.run.vm06.stdout:lvcreate is present 2026-03-09T17:50:22.702 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-09T17:50:22.702 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T17:50:22.711 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-09T17:50:22.711 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-03-09T17:50:22.718 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout enabled 2026-03-09T17:50:22.724 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout active 2026-03-09T17:50:22.724 INFO:teuthology.orchestra.run.vm06.stdout:Unit chronyd.service is enabled and running 2026-03-09T17:50:22.724 INFO:teuthology.orchestra.run.vm06.stdout:Host looks OK 2026-03-09T17:50:22.724 INFO:teuthology.orchestra.run.vm06.stdout:Cluster fsid: 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:22.724 INFO:teuthology.orchestra.run.vm06.stdout:Acquiring lock 139632331309936 on /run/cephadm/588d7312-1be0-11f1-b5b6-61233c7d7c44.lock 2026-03-09T17:50:22.724 INFO:teuthology.orchestra.run.vm06.stdout:Lock 139632331309936 acquired on /run/cephadm/588d7312-1be0-11f1-b5b6-61233c7d7c44.lock 2026-03-09T17:50:22.724 INFO:teuthology.orchestra.run.vm06.stdout:Verifying IP 192.168.123.106 port 3300 ... 2026-03-09T17:50:22.725 INFO:teuthology.orchestra.run.vm06.stdout:Verifying IP 192.168.123.106 port 6789 ... 2026-03-09T17:50:22.725 INFO:teuthology.orchestra.run.vm06.stdout:Base mon IP(s) is [192.168.123.106:3300, 192.168.123.106:6789], mon addrv is [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-03-09T17:50:22.729 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.106 metric 100 2026-03-09T17:50:22.729 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.106 metric 100 2026-03-09T17:50:22.732 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-09T17:50:22.732 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-09T17:50:22.735 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-09T17:50:22.735 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-09T17:50:22.735 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-09T17:50:22.735 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-09T17:50:22.735 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:6/64 scope link noprefixroute 2026-03-09T17:50:22.735 INFO:teuthology.orchestra.run.vm06.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-09T17:50:22.736 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.0/24` 2026-03-09T17:50:22.736 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.0/24` 2026-03-09T17:50:22.736 INFO:teuthology.orchestra.run.vm06.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-09T17:50:22.736 INFO:teuthology.orchestra.run.vm06.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-09T17:50:22.737 INFO:teuthology.orchestra.run.vm06.stdout:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T17:50:23.962 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stdout 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-09T17:50:23.962 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Trying to pull quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T17:50:23.962 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Getting image source signatures 2026-03-09T17:50:23.962 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Copying blob sha256:1752b8d01aa0dd33bbe0ab24e8316174c94fbdcd5d26252e2680bba0624747a7 2026-03-09T17:50:23.962 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Copying blob sha256:8e380faede39ebd4286247457b408d979ab568aafd8389c42ec304b8cfba4e92 2026-03-09T17:50:23.962 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Copying config sha256:654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-09T17:50:23.962 INFO:teuthology.orchestra.run.vm06.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-09T17:50:24.108 INFO:teuthology.orchestra.run.vm06.stdout:ceph: stdout ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-09T17:50:24.108 INFO:teuthology.orchestra.run.vm06.stdout:Ceph version: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-09T17:50:24.108 INFO:teuthology.orchestra.run.vm06.stdout:Extracting ceph user uid/gid from container image... 2026-03-09T17:50:24.214 INFO:teuthology.orchestra.run.vm06.stdout:stat: stdout 167 167 2026-03-09T17:50:24.215 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial keys... 2026-03-09T17:50:24.333 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQBgCK9p9wzdERAA88PoPyRdVK3nWijsMguNDA== 2026-03-09T17:50:24.443 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQBgCK9pf5vtGBAAUrwyHMV0CZt1ExzbGvWA9w== 2026-03-09T17:50:24.557 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQBgCK9pn3whIBAAaay47UYV7NJBQXDBf4v9Bg== 2026-03-09T17:50:24.558 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial monmap... 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:monmaptool for vm06 [v2:192.168.123.106:3300,v1:192.168.123.106:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:setting min_mon_release = quincy 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: set fsid to 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:24.689 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T17:50:24.690 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:24.690 INFO:teuthology.orchestra.run.vm06.stdout:Creating mon... 2026-03-09T17:50:24.837 INFO:teuthology.orchestra.run.vm06.stdout:create mon.vm06 on 2026-03-09T17:50:25.193 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-09T17:50:25.357 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44.target → /etc/systemd/system/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44.target. 2026-03-09T17:50:25.358 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44.target → /etc/systemd/system/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44.target. 2026-03-09T17:50:25.549 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm06 2026-03-09T17:50:25.549 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to reset failed state of unit ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm06.service: Unit ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm06.service not loaded. 2026-03-09T17:50:25.715 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44.target.wants/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm06.service → /etc/systemd/system/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@.service. 2026-03-09T17:50:25.904 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-09T17:50:25.904 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to enable service . firewalld.service is not available 2026-03-09T17:50:25.904 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mon to start... 2026-03-09T17:50:25.904 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mon... 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout cluster: 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout id: 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout services: 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm06 (age 0.141647s) 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout data: 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout pgs: 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:mon is available 2026-03-09T17:50:26.114 INFO:teuthology.orchestra.run.vm06.stdout:Assimilating anything we can from ceph.conf... 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [global] 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout fsid = 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [osd] 2026-03-09T17:50:26.332 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-09T17:50:26.333 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-09T17:50:26.333 INFO:teuthology.orchestra.run.vm06.stdout:Generating new minimal ceph.conf... 2026-03-09T17:50:26.549 INFO:teuthology.orchestra.run.vm06.stdout:Restarting the monitor... 2026-03-09T17:50:27.226 INFO:teuthology.orchestra.run.vm06.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-09T17:50:27.439 INFO:teuthology.orchestra.run.vm06.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-09T17:50:27.440 INFO:teuthology.orchestra.run.vm06.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-09T17:50:27.440 INFO:teuthology.orchestra.run.vm06.stdout:Creating mgr... 2026-03-09T17:50:27.440 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-09T17:50:27.440 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-09T17:50:27.441 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:8443 ... 2026-03-09T17:50:27.641 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mgr.vm06.shmhyl 2026-03-09T17:50:27.641 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to reset failed state of unit ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mgr.vm06.shmhyl.service: Unit ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mgr.vm06.shmhyl.service not loaded. 2026-03-09T17:50:27.792 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44.target.wants/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mgr.vm06.shmhyl.service → /etc/systemd/system/ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@.service. 2026-03-09T17:50:27.989 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-09T17:50:27.989 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to enable service . firewalld.service is not available 2026-03-09T17:50:27.989 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-09T17:50:27.989 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-03-09T17:50:27.989 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr to start... 2026-03-09T17:50:27.989 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr... 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "588d7312-1be0-11f1-b5b6-61233c7d7c44", 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 0, 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T17:50:28.301 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T17:50:25:949236+0000", 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T17:50:28.302 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T17:50:25.950183+0000", 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-09T17:50:28.303 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (1/15)... 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "588d7312-1be0-11f1-b5b6-61233c7d7c44", 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T17:50:30.616 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T17:50:30.617 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T17:50:25:949236+0000", 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T17:50:25.950183+0000", 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-09T17:50:30.618 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (2/15)... 2026-03-09T17:50:32.894 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:32.894 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "588d7312-1be0-11f1-b5b6-61233c7d7c44", 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T17:50:32.895 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T17:50:25:949236+0000", 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T17:50:25.950183+0000", 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-09T17:50:32.896 INFO:teuthology.orchestra.run.vm06.stdout:mgr is available 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [global] 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout fsid = 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-09T17:50:33.168 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [osd] 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-09T17:50:33.169 INFO:teuthology.orchestra.run.vm06.stdout:Enabling cephadm module... 2026-03-09T17:50:34.793 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-09T17:50:34.793 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-09T17:50:34.794 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T17:50:34.794 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "active_name": "vm06.shmhyl", 2026-03-09T17:50:34.794 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-09T17:50:34.794 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-09T17:50:34.794 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for the mgr to restart... 2026-03-09T17:50:34.794 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr epoch 5... 2026-03-09T17:50:38.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:37 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.shmhyl/mirror_snapshot_schedule"}]: dispatch 2026-03-09T17:50:38.903 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-09T17:50:38.903 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-09T17:50:38.903 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-09T17:50:38.903 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-09T17:50:38.903 INFO:teuthology.orchestra.run.vm06.stdout:mgr epoch 5 is available 2026-03-09T17:50:38.903 INFO:teuthology.orchestra.run.vm06.stdout:Setting orchestrator backend to cephadm... 2026-03-09T17:50:39.149 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:38 vm06 ceph-mon[53878]: Found migration_current of "None". Setting to last migration. 2026-03-09T17:50:39.149 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:38 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.shmhyl/trash_purge_schedule"}]: dispatch 2026-03-09T17:50:39.149 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:38 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:39.149 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:38 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:39.149 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:38 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:50:39.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:38 vm06 ceph-mon[53878]: mgrmap e7: vm06.shmhyl(active, since 1.0099s) 2026-03-09T17:50:39.452 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-09T17:50:39.452 INFO:teuthology.orchestra.run.vm06.stdout:Generating ssh key... 2026-03-09T17:50:40.004 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO2thmtKGKn54oCPVcBLbxTlx7F0bbxjEolM3ZaTwQZe ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:40.004 INFO:teuthology.orchestra.run.vm06.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-09T17:50:40.004 INFO:teuthology.orchestra.run.vm06.stdout:Adding key to root@localhost authorized_keys... 2026-03-09T17:50:40.004 INFO:teuthology.orchestra.run.vm06.stdout:Adding host vm06... 2026-03-09T17:50:40.225 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:38] ENGINE Bus STARTING 2026-03-09T17:50:40.225 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:38] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T17:50:40.225 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:38] ENGINE Client ('192.168.123.106', 44366) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T17:50:40.225 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:38] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:38] ENGINE Bus STARTED 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:40.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:40 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:41.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:41 vm06 ceph-mon[53878]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:41.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:41 vm06 ceph-mon[53878]: Generating ssh key... 2026-03-09T17:50:41.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:41 vm06 ceph-mon[53878]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:41.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:41 vm06 ceph-mon[53878]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "addr": "192.168.123.106", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:41.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:41 vm06 ceph-mon[53878]: mgrmap e8: vm06.shmhyl(active, since 2s) 2026-03-09T17:50:41.775 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Added host 'vm06' with addr '192.168.123.106' 2026-03-09T17:50:41.775 INFO:teuthology.orchestra.run.vm06.stdout:Deploying mon service with default placement... 2026-03-09T17:50:42.047 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-09T17:50:42.048 INFO:teuthology.orchestra.run.vm06.stdout:Deploying mgr service with default placement... 2026-03-09T17:50:42.270 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:42 vm06 ceph-mon[53878]: Deploying cephadm binary to vm06 2026-03-09T17:50:42.270 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:42 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:42.270 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:42 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:50:42.270 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:42 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:42.290 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-09T17:50:42.290 INFO:teuthology.orchestra.run.vm06.stdout:Deploying crash service with default placement... 2026-03-09T17:50:42.551 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-03-09T17:50:42.551 INFO:teuthology.orchestra.run.vm06.stdout:Deploying ceph-exporter service with default placement... 2026-03-09T17:50:42.799 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-03-09T17:50:42.799 INFO:teuthology.orchestra.run.vm06.stdout:Deploying prometheus service with default placement... 2026-03-09T17:50:43.067 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-03-09T17:50:43.067 INFO:teuthology.orchestra.run.vm06.stdout:Deploying grafana service with default placement... 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: Added host vm06 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: Saving service mon spec with placement count:5 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: Saving service mgr spec with placement count:2 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: Saving service crash spec with placement * 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:43.318 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:43 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:43.395 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-03-09T17:50:43.395 INFO:teuthology.orchestra.run.vm06.stdout:Deploying node-exporter service with default placement... 2026-03-09T17:50:43.710 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-03-09T17:50:43.710 INFO:teuthology.orchestra.run.vm06.stdout:Deploying alertmanager service with default placement... 2026-03-09T17:50:44.045 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-03-09T17:50:44.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:44.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: Saving service ceph-exporter spec with placement * 2026-03-09T17:50:44.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: Saving service prometheus spec with placement count:1 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: Saving service grafana spec with placement count:1 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:44.598 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3910894468' entity='client.admin' 2026-03-09T17:50:44.669 INFO:teuthology.orchestra.run.vm06.stdout:Enabling the dashboard module... 2026-03-09T17:50:45.317 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:45 vm06 ceph-mon[53878]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:45.317 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:45 vm06 ceph-mon[53878]: Saving service node-exporter spec with placement * 2026-03-09T17:50:45.317 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:45 vm06 ceph-mon[53878]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:45.317 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:45 vm06 ceph-mon[53878]: Saving service alertmanager spec with placement count:1 2026-03-09T17:50:45.317 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:45 vm06 ceph-mon[53878]: from='mgr.14118 192.168.123.106:0/2688802139' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:45.596 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:45 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2917895639' entity='client.admin' 2026-03-09T17:50:45.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:45 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3779106453' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "active_name": "vm06.shmhyl", 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for the mgr to restart... 2026-03-09T17:50:46.437 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr epoch 9... 2026-03-09T17:50:47.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:47 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3779106453' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-09T17:50:47.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:47 vm06 ceph-mon[53878]: mgrmap e9: vm06.shmhyl(active, since 8s) 2026-03-09T17:50:47.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:47 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1789462674' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T17:50:49.400 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: Active manager daemon vm06.shmhyl restarted 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: Activating manager daemon vm06.shmhyl 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: osdmap e3: 0 total, 0 up, 0 in 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: mgrmap e10: vm06.shmhyl(active, starting, since 0.0097528s) 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr metadata", "who": "vm06.shmhyl", "id": "vm06.shmhyl"}]: dispatch 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T17:50:49.401 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T17:50:49.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: Manager daemon vm06.shmhyl is now available 2026-03-09T17:50:49.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:50:49.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:49 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.shmhyl/mirror_snapshot_schedule"}]: dispatch 2026-03-09T17:50:50.406 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-03-09T17:50:50.406 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-09T17:50:50.406 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-09T17:50:50.406 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-03-09T17:50:50.406 INFO:teuthology.orchestra.run.vm06.stdout:mgr epoch 9 is available 2026-03-09T17:50:50.406 INFO:teuthology.orchestra.run.vm06.stdout:Generating a dashboard self-signed certificate... 2026-03-09T17:50:50.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:50 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.shmhyl/trash_purge_schedule"}]: dispatch 2026-03-09T17:50:50.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:50 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:50.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:50 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:50.744 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:50 vm06 ceph-mon[53878]: mgrmap e11: vm06.shmhyl(active, since 1.01786s) 2026-03-09T17:50:50.997 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-09T17:50:50.997 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial admin user... 2026-03-09T17:50:51.652 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$a95Vea4D07GrbOp8ha1NDODDG2PBpscg2wNG.8dYGGm6URYkZ5WjC", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773078651, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-09T17:50:51.652 INFO:teuthology.orchestra.run.vm06.stdout:Fetching dashboard port number... 2026-03-09T17:50:51.901 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 8443 2026-03-09T17:50:51.901 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-03-09T17:50:51.901 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-09T17:50:51.903 INFO:teuthology.orchestra.run.vm06.stdout:Ceph Dashboard is now available at: 2026-03-09T17:50:51.903 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:51.903 INFO:teuthology.orchestra.run.vm06.stdout: URL: https://vm06.local:8443/ 2026-03-09T17:50:51.903 INFO:teuthology.orchestra.run.vm06.stdout: User: admin 2026-03-09T17:50:51.903 INFO:teuthology.orchestra.run.vm06.stdout: Password: jtmugev6t3 2026-03-09T17:50:51.903 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:51.903 INFO:teuthology.orchestra.run.vm06.stdout:Saving cluster configuration to /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config directory 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:50] ENGINE Bus STARTING 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:50] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:50] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:50] ENGINE Bus STARTED 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: [09/Mar/2026:17:50:50] ENGINE Client ('192.168.123.106', 33720) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:52.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:51 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4228836268' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout:Or, if you are only running a single cluster on this host: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: ceph telemetry on 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout:For more information see: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:52.208 INFO:teuthology.orchestra.run.vm06.stdout:Bootstrap complete. 2026-03-09T17:50:52.242 INFO:tasks.cephadm:Fetching config... 2026-03-09T17:50:52.242 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:50:52.242 DEBUG:teuthology.orchestra.run.vm06:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-09T17:50:52.260 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-09T17:50:52.260 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:50:52.260 DEBUG:teuthology.orchestra.run.vm06:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-09T17:50:52.327 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-09T17:50:52.327 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:50:52.327 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/keyring of=/dev/stdout 2026-03-09T17:50:52.395 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-09T17:50:52.395 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:50:52.395 DEBUG:teuthology.orchestra.run.vm06:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-09T17:50:52.458 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-09T17:50:52.458 DEBUG:teuthology.orchestra.run.vm06:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO2thmtKGKn54oCPVcBLbxTlx7F0bbxjEolM3ZaTwQZe ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T17:50:52.539 INFO:teuthology.orchestra.run.vm06.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO2thmtKGKn54oCPVcBLbxTlx7F0bbxjEolM3ZaTwQZe ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:52.551 DEBUG:teuthology.orchestra.run.vm09:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO2thmtKGKn54oCPVcBLbxTlx7F0bbxjEolM3ZaTwQZe ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T17:50:52.608 INFO:teuthology.orchestra.run.vm09.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO2thmtKGKn54oCPVcBLbxTlx7F0bbxjEolM3ZaTwQZe ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:50:52.635 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-09T17:50:52.827 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:50:53.116 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:52 vm06 ceph-mon[53878]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:53.116 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:52 vm06 ceph-mon[53878]: mgrmap e12: vm06.shmhyl(active, since 2s) 2026-03-09T17:50:53.116 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:52 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3962763073' entity='client.admin' 2026-03-09T17:50:53.152 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-09T17:50:53.152 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-09T17:50:53.335 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:50:53.732 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm09 2026-03-09T17:50:53.732 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:50:53.732 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.conf 2026-03-09T17:50:53.754 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:50:53.754 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T17:50:53.815 INFO:tasks.cephadm:Adding host vm09 to orchestrator... 2026-03-09T17:50:53.815 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph orch host add vm09 2026-03-09T17:50:54.035 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:50:54.203 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:54 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3784366255' entity='client.admin' 2026-03-09T17:50:54.203 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:54 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:55.596 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:55 vm06 ceph-mon[53878]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:55.596 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:55 vm06 ceph-mon[53878]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:50:56.550 INFO:teuthology.orchestra.run.vm06.stdout:Added host 'vm09' with addr '192.168.123.109' 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: Deploying cephadm binary to vm09 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: Updating vm06:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: Updating vm06:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.client.admin.keyring 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: mgrmap e13: vm06.shmhyl(active, since 6s) 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-09T17:50:56.632 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-09T17:50:56.633 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:56 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:50:56.817 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph orch host ls --format=json 2026-03-09T17:50:57.107 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:50:57.386 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:50:57.386 INFO:teuthology.orchestra.run.vm06.stdout:[{"addr": "192.168.123.106", "hostname": "vm06", "labels": [], "status": ""}, {"addr": "192.168.123.109", "hostname": "vm09", "labels": [], "status": ""}] 2026-03-09T17:50:57.468 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-09T17:50:57.468 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd crush tunables default 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: Deploying daemon ceph-exporter.vm06 on vm06 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: Added host vm09 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:50:57.617 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:57 vm06 ceph-mon[53878]: Deploying daemon crash.vm06 on vm06 2026-03-09T17:50:57.783 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:50:59.054 INFO:teuthology.orchestra.run.vm06.stderr:adjusted tunables profile to default 2026-03-09T17:50:59.110 INFO:tasks.cephadm:Adding mon.vm06 on vm06 2026-03-09T17:50:59.110 INFO:tasks.cephadm:Adding mon.vm09 on vm09 2026-03-09T17:50:59.110 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph orch apply mon '2;vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09' 2026-03-09T17:50:59.306 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:50:59.345 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:50:59.346 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:59 vm06 ceph-mon[53878]: from='client.14189 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T17:50:59.346 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:59 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:59.346 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:59 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:59.346 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:59 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:59.346 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:59 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:50:59.346 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:59 vm06 ceph-mon[53878]: Deploying daemon node-exporter.vm06 on vm06 2026-03-09T17:50:59.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:50:59 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4277263965' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-09T17:50:59.605 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mon update... 2026-03-09T17:50:59.673 DEBUG:teuthology.orchestra.run.vm09:mon.vm09> sudo journalctl -f -n 0 -u ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm09.service 2026-03-09T17:50:59.675 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:50:59.675 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:50:59.903 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:50:59.945 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:00.225 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:00.225 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:00.225 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:00.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:00 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4277263965' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-09T17:51:00.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:00 vm06 ceph-mon[53878]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T17:51:00.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:00 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:00.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:00 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:01.317 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:01.317 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:01 vm06 ceph-mon[53878]: from='client.14193 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:51:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:01 vm06 ceph-mon[53878]: Saving service mon spec with placement vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09;count:2 2026-03-09T17:51:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:01 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/4042838400' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:01 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:01 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:01 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:01 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:01.494 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:01.534 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:01.815 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:01.815 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:01.815 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:02 vm06 ceph-mon[53878]: Deploying daemon alertmanager.vm06 on vm06 2026-03-09T17:51:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:02 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/3467108991' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:02.873 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:02.873 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:03.037 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:03.071 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:03.398 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:03.398 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:03.398 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:03.725 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:03 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/752343300' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:04.448 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:04.448 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:04.614 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:04.651 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:04.887 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:04.887 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:04.887 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: Deploying daemon grafana.vm06 on vm06 2026-03-09T17:51:05.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:05 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1890827030' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:05.949 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:05.950 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:06.123 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:06.158 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:06.427 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:06.427 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:06.427 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:06.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:06 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/3565032479' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:07.499 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:07.499 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:07.667 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:07.704 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:07.973 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:07.974 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:07.974 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:08.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:08 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1643965865' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:09.030 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:09.030 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:09.190 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:09.224 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:09.476 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:09.476 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:09.476 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:10.537 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:10.537 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:10.645 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:10 vm06 ceph-mon[53878]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:10.646 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:10 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2147561059' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:10.711 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:10.743 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:11.001 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:11.001 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:11.001 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/4094540594' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:11.911 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:11 vm06 ceph-mon[53878]: Deploying daemon prometheus.vm06 on vm06 2026-03-09T17:51:12.071 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:12.071 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:12.231 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:12.268 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:12.524 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:12.524 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:12.524 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:13.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:12 vm06 ceph-mon[53878]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:13.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:12 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2889189808' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:13.587 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:13.587 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:13.753 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:13.787 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:14.052 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:14.052 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:14.053 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:15.114 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:15.114 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:15.271 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:15.310 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:15.346 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:14 vm06 ceph-mon[53878]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:15.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:14 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1363754372' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:15.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:14 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:15.563 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:15.563 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:15.564 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:16.122 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:15 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/4028339477' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:16.628 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:16.628 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:16.779 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:16.810 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:17.046 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:17.046 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:17.046 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:17.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:16 vm06 ceph-mon[53878]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:17.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:16 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:17.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:16 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:17.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:16 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:17.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:16 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-09T17:51:18.035 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:17 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1011949546' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:18.035 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:17 vm06 ceph-mon[53878]: from='mgr.14162 192.168.123.106:0/4230294033' entity='mgr.vm06.shmhyl' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-09T17:51:18.035 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:17 vm06 ceph-mon[53878]: mgrmap e14: vm06.shmhyl(active, since 27s) 2026-03-09T17:51:18.089 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:18.089 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:18.255 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:18.292 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:18.547 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:18.547 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:18.547 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:18.980 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:18 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/728042257' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:19.592 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:19.592 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:19.758 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:19.801 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:20.059 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:20.060 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:20.060 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:20.457 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:20 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/659898491' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:21.102 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:21.102 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:21.259 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:21.294 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:21.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: Active manager daemon vm06.shmhyl restarted 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: Activating manager daemon vm06.shmhyl 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: osdmap e5: 0 total, 0 up, 0 in 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: mgrmap e15: vm06.shmhyl(active, starting, since 0.00632079s) 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr metadata", "who": "vm06.shmhyl", "id": "vm06.shmhyl"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: Manager daemon vm06.shmhyl is now available 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.shmhyl/mirror_snapshot_schedule"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.shmhyl/trash_purge_schedule"}]: dispatch 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:21.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:21.561 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:21.561 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:21.561 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:22.609 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:22.609 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:22.799 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: [09/Mar/2026:17:51:21] ENGINE Bus STARTING 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: mgrmap e16: vm06.shmhyl(active, since 1.01204s) 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: [09/Mar/2026:17:51:21] ENGINE Serving on https://192.168.123.106:7150 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: [09/Mar/2026:17:51:21] ENGINE Client ('192.168.123.106', 45932) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: [09/Mar/2026:17:51:21] ENGINE Serving on http://192.168.123.106:8765 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: [09/Mar/2026:17:51:21] ENGINE Bus STARTED 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/3914158492' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:22.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:22 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-09T17:51:22.851 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-09T17:51:23.130 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:23.130 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:23.130 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:24.181 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:24.182 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:24.357 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: mgrmap e17: vm06.shmhyl(active, since 2s) 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/551048533' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:24 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:51:24.654 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:24.654 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:24.654 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm09:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm06:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm09:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.client.admin.keyring 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Updating vm06:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.client.admin.keyring 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: Deploying daemon ceph-exporter.vm09 on vm09 2026-03-09T17:51:25.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:25 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1354979092' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:25.820 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:25.820 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:26.076 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:26.391 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:26.391 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:26.391 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: Deploying daemon crash.vm09 on vm09 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:26 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1394480265' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:27.450 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:27.450 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:27.615 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:27.843 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:27 vm06 ceph-mon[53878]: Deploying daemon node-exporter.vm09 on vm09 2026-03-09T17:51:27.871 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:27.871 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:27.871 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:28.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:28 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2379793802' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:28.921 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:28.921 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:29.146 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:29.512 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:29.513 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:50:24.660766Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T17:51:29.513 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.lvfebd", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.lvfebd", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: Deploying daemon mgr.vm09.lvfebd on vm09 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/654001813' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T17:51:30.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:29 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:30.585 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-09T17:51:30.585 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mon dump -f json 2026-03-09T17:51:30.853 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:51:35.761 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:35 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: mon.vm06 calling monitor election 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: mon.vm09 calling monitor election 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.? 192.168.123.109:0/3259239126' entity='mgr.vm09.lvfebd' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.lvfebd/crt"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: mon.vm06 is new leader, mons vm06,vm09 in quorum (ranks 0,1) 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: monmap epoch 2 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: last_changed 2026-03-09T17:51:30.695772+0000 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: created 2026-03-09T17:50:24.660766+0000 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: min_mon_release 19 (squid) 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: election_strategy: 1 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: fsmap 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: osdmap e5: 0 total, 0 up, 0 in 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: mgrmap e17: vm06.shmhyl(active, since 15s) 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: overall HEALTH_OK 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: Standby manager daemon vm09.lvfebd started 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.? 192.168.123.109:0/3259239126' entity='mgr.vm09.lvfebd' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.? 192.168.123.109:0/3259239126' entity='mgr.vm09.lvfebd' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.lvfebd/key"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.? 192.168.123.109:0/3259239126' entity='mgr.vm09.lvfebd' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:36.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: mgrmap e18: vm06.shmhyl(active, since 15s), standbys: vm09.lvfebd 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr metadata", "who": "vm09.lvfebd", "id": "vm09.lvfebd"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: Updating vm09:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: Updating vm06:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: Reconfiguring mon.vm06 (unknown last config time)... 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: Reconfiguring daemon mon.vm06 on vm06 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.shmhyl", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-09T17:51:36.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:36 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: Updating vm06:/etc/ceph/ceph.conf 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: mgrmap e18: vm06.shmhyl(active, since 15s), standbys: vm09.lvfebd 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr metadata", "who": "vm09.lvfebd", "id": "vm09.lvfebd"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: Updating vm09:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: Updating vm06:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/config/ceph.conf 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: Reconfiguring mon.vm06 (unknown last config time)... 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: Reconfiguring daemon mon.vm06 on vm06 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.shmhyl", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-09T17:51:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:36 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:37.270 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T17:51:37.271 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":2,"fsid":"588d7312-1be0-11f1-b5b6-61233c7d7c44","modified":"2026-03-09T17:51:30.695772Z","created":"2026-03-09T17:50:24.660766Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm09","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:3300","nonce":0},{"type":"v1","addr":"192.168.123.109:6789","nonce":0}]},"addr":"192.168.123.109:6789/0","public_addr":"192.168.123.109:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-09T17:51:37.271 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 2 2026-03-09T17:51:37.321 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-09T17:51:37.321 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph config generate-minimal-conf 2026-03-09T17:51:37.529 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:37.796 INFO:teuthology.orchestra.run.vm06.stdout:# minimal ceph.conf for 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:51:37.796 INFO:teuthology.orchestra.run.vm06.stdout:[global] 2026-03-09T17:51:37.796 INFO:teuthology.orchestra.run.vm06.stdout: fsid = 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:51:37.796 INFO:teuthology.orchestra.run.vm06.stdout: mon_host = [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-03-09T17:51:37.942 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-09T17:51:37.942 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:51:37.942 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring mgr.vm06.shmhyl (unknown last config time)... 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring daemon mgr.vm06.shmhyl on vm06 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring crash.vm06 (monmap changed)... 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring daemon crash.vm06 on vm06 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring alertmanager.vm06 (dependencies changed)... 2026-03-09T17:51:38.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: Reconfiguring daemon alertmanager.vm06 on vm06 2026-03-09T17:51:38.008 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2294884796' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:38.008 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.008 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.008 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:37 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1539218998' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:38.012 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:51:38.012 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T17:51:38.089 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:51:38.089 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T17:51:38.115 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:51:38.115 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T17:51:38.179 INFO:tasks.cephadm:Deploying OSDs... 2026-03-09T17:51:38.179 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:51:38.179 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T17:51:38.204 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-09T17:51:38.204 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme0n1 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme0n1 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 727 Links: 1 Device type: 103,1 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:50:54.995317523 +0000 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:34.654466803 +0000 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:34.654466803 +0000 2026-03-09T17:51:38.263 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:49:33.795858840 +0000 2026-03-09T17:51:38.263 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-03-09T17:51:38.327 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:51:38.327 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:51:38.327 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000119334 s, 4.3 MB/s 2026-03-09T17:51:38.328 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring mgr.vm06.shmhyl (unknown last config time)... 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring daemon mgr.vm06.shmhyl on vm06 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring crash.vm06 (monmap changed)... 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring daemon crash.vm06 on vm06 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring alertmanager.vm06 (dependencies changed)... 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: Reconfiguring daemon alertmanager.vm06 on vm06 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/2294884796' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:38.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:37 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1539218998' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:38.387 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme1n1 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme1n1 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 740 Links: 1 Device type: 103,2 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:50:55.031317574 +0000 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:35.121066269 +0000 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:35.121066269 +0000 2026-03-09T17:51:38.448 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:49:33.928858974 +0000 2026-03-09T17:51:38.448 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-03-09T17:51:38.527 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:51:38.527 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:51:38.527 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.00150204 s, 341 kB/s 2026-03-09T17:51:38.528 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-03-09T17:51:38.593 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme2n1 2026-03-09T17:51:38.655 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme2n1 2026-03-09T17:51:38.655 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:38.656 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 751 Links: 1 Device type: 103,5 2026-03-09T17:51:38.656 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:38.656 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:38.656 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:50:55.072317632 +0000 2026-03-09T17:51:38.656 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:35.578001133 +0000 2026-03-09T17:51:38.656 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:35.578001133 +0000 2026-03-09T17:51:38.656 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:49:34.054859101 +0000 2026-03-09T17:51:38.656 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-03-09T17:51:38.733 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:51:38.734 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:51:38.734 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.00012316 s, 4.2 MB/s 2026-03-09T17:51:38.734 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-03-09T17:51:38.794 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme3n1 2026-03-09T17:51:38.931 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme3n1 2026-03-09T17:51:38.931 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:38.931 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 765 Links: 1 Device type: 103,7 2026-03-09T17:51:38.931 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:38.931 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:38.932 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:50:55.110317686 +0000 2026-03-09T17:51:38.932 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:49:36.057105584 +0000 2026-03-09T17:51:38.932 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:49:36.057105584 +0000 2026-03-09T17:51:38.932 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-09 17:49:34.174859222 +0000 2026-03-09T17:51:38.932 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-03-09T17:51:38.965 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:51:38.965 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:51:38.965 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000531366 s, 964 kB/s 2026-03-09T17:51:38.966 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-03-09T17:51:39.038 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:51:39.038 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T17:51:39.053 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-09T17:51:39.053 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme0n1 2026-03-09T17:51:39.068 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:38 vm06 ceph-mon[53878]: Reconfiguring grafana.vm06 (dependencies changed)... 2026-03-09T17:51:39.068 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:38 vm06 ceph-mon[53878]: Reconfiguring daemon grafana.vm06 on vm06 2026-03-09T17:51:39.068 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:38 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:39.068 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:38 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme0n1 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 729 Links: 1 Device type: 103,1 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:51:23.403637559 +0000 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:38.247669850 +0000 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:38.247669850 +0000 2026-03-09T17:51:39.111 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:49:37.393507933 +0000 2026-03-09T17:51:39.111 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-03-09T17:51:39.176 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:51:39.176 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:51:39.176 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000170377 s, 3.0 MB/s 2026-03-09T17:51:39.177 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-03-09T17:51:39.234 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme1n1 2026-03-09T17:51:39.292 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme1n1 2026-03-09T17:51:39.292 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:39.293 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 739 Links: 1 Device type: 103,3 2026-03-09T17:51:39.293 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:39.293 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:39.293 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:51:23.428637395 +0000 2026-03-09T17:51:39.293 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:38.685752894 +0000 2026-03-09T17:51:39.293 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:38.685752894 +0000 2026-03-09T17:51:39.293 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:49:37.511530306 +0000 2026-03-09T17:51:39.293 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-03-09T17:51:39.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:38 vm09 ceph-mon[62586]: Reconfiguring grafana.vm06 (dependencies changed)... 2026-03-09T17:51:39.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:38 vm09 ceph-mon[62586]: Reconfiguring daemon grafana.vm06 on vm06 2026-03-09T17:51:39.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:38 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:39.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:38 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:39.356 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:51:39.357 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:51:39.357 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000178433 s, 2.9 MB/s 2026-03-09T17:51:39.357 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-03-09T17:51:39.416 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme2n1 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme2n1 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 751 Links: 1 Device type: 103,5 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:51:23.452637238 +0000 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:39.117834800 +0000 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:39.117834800 +0000 2026-03-09T17:51:39.476 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:49:37.634553627 +0000 2026-03-09T17:51:39.476 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-03-09T17:51:39.542 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:51:39.542 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:51:39.542 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000411477 s, 1.2 MB/s 2026-03-09T17:51:39.543 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-03-09T17:51:39.603 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme3n1 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme3n1 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 765 Links: 1 Device type: 103,7 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 17:51:23.476637081 +0000 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 17:49:39.550853815 +0000 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 17:49:39.550853815 +0000 2026-03-09T17:51:39.666 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 17:49:37.758577137 +0000 2026-03-09T17:51:39.666 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-03-09T17:51:39.754 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T17:51:39.754 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T17:51:39.754 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000108132 s, 4.7 MB/s 2026-03-09T17:51:39.755 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-03-09T17:51:39.790 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph orch apply osd --all-available-devices 2026-03-09T17:51:40.065 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:51:40.261 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: Reconfiguring prometheus.vm06 (dependencies changed)... 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: Reconfiguring daemon prometheus.vm06 on vm06 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-09T17:51:40.262 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:39 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:40.318 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled osd.all-available-devices update... 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: Reconfiguring prometheus.vm06 (dependencies changed)... 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: Reconfiguring daemon prometheus.vm06 on vm06 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-09T17:51:40.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:39 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:40.380 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-09T17:51:40.380 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:40.555 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:40.843 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:40.936 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-09T17:51:41.054 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: Reconfiguring crash.vm09 (monmap changed)... 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: Reconfiguring daemon crash.vm09 on vm09 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: Reconfiguring mgr.vm09.lvfebd (monmap changed)... 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.lvfebd", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: Reconfiguring daemon mgr.vm09.lvfebd on vm09 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-09T17:51:41.055 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T17:51:41.310 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T17:51:41.310 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.310 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T17:51:41.310 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T17:51:41.310 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.310 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:51:41.310 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:41 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2499159572' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: Reconfiguring crash.vm09 (monmap changed)... 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: Reconfiguring daemon crash.vm09 on vm09 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: Reconfiguring mgr.vm09.lvfebd (monmap changed)... 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.lvfebd", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: Reconfiguring daemon mgr.vm09.lvfebd on vm09 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T17:51:41.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T17:51:41.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:41.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:51:41.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:41 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2499159572' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:41.937 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:42.171 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='client.14256 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: Marking host: vm06 for OSDSpec preview refresh. 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: Marking host: vm09 for OSDSpec preview refresh. 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: Saving service osd.all-available-devices spec with placement * 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: Reconfiguring mon.vm09 (monmap changed)... 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: Reconfiguring daemon mon.vm09 on vm09 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-09T17:51:42.226 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:51:42.227 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:42 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='client.14256 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: Marking host: vm06 for OSDSpec preview refresh. 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: Marking host: vm09 for OSDSpec preview refresh. 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: Saving service osd.all-available-devices spec with placement * 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: Reconfiguring mon.vm09 (monmap changed)... 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: Reconfiguring daemon mon.vm09 on vm09 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-03-09T17:51:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:51:42.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:42 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:51:42.431 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:42.484 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-09T17:51:43.320 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:43 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2688620094' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:43.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:43 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2688620094' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:43.485 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:43.686 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:43.936 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:44.000 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1773078703,"num_remapped_pgs":0} 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/1834601362' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4560bd77-dc28-428e-808a-b08a3b7c4f61"}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4560bd77-dc28-428e-808a-b08a3b7c4f61"}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4560bd77-dc28-428e-808a-b08a3b7c4f61"}]': finished 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/683054983' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5eb6c31c-2e94-4e9e-b8a3-90ec623140df"}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/683054983' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5eb6c31c-2e94-4e9e-b8a3-90ec623140df"}]': finished 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: osdmap e7: 2 total, 0 up, 2 in 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/74703019' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4061320135' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:44.066 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/555515838' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1834601362' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4560bd77-dc28-428e-808a-b08a3b7c4f61"}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4560bd77-dc28-428e-808a-b08a3b7c4f61"}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4560bd77-dc28-428e-808a-b08a3b7c4f61"}]': finished 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/683054983' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5eb6c31c-2e94-4e9e-b8a3-90ec623140df"}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/683054983' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5eb6c31c-2e94-4e9e-b8a3-90ec623140df"}]': finished 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: osdmap e7: 2 total, 0 up, 2 in 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/74703019' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4061320135' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:44.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/555515838' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:45.001 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:45.171 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:45.397 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:45.457 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1773078703,"num_remapped_pgs":0} 2026-03-09T17:51:46.458 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:46.559 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:46 vm09 ceph-mon[62586]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:46.559 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:46 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3317168936' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:46.634 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:46.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:46 vm06 ceph-mon[53878]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:46.661 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:46 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3317168936' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:46.868 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:46.954 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":8,"num_osds":3,"num_up_osds":0,"osd_up_since":0,"num_in_osds":3,"osd_in_since":1773078706,"num_remapped_pgs":0} 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/1793429749' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d74ddbaf-1751-4a6a-90ef-58defa6b7fec"}]: dispatch 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d74ddbaf-1751-4a6a-90ef-58defa6b7fec"}]: dispatch 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d74ddbaf-1751-4a6a-90ef-58defa6b7fec"}]': finished 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: osdmap e8: 3 total, 0 up, 3 in 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/225327573' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:47.586 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:47 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/3035094478' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1793429749' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d74ddbaf-1751-4a6a-90ef-58defa6b7fec"}]: dispatch 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d74ddbaf-1751-4a6a-90ef-58defa6b7fec"}]: dispatch 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d74ddbaf-1751-4a6a-90ef-58defa6b7fec"}]': finished 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: osdmap e8: 3 total, 0 up, 3 in 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/225327573' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:47.710 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:47 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/3035094478' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:47.956 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:48.169 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:48.405 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:48.488 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773078707,"num_remapped_pgs":0} 2026-03-09T17:51:48.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:48 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1160315018' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5144a6b7-3345-4634-9f5f-5f16e2d43b37"}]: dispatch 2026-03-09T17:51:48.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:48 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1160315018' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5144a6b7-3345-4634-9f5f-5f16e2d43b37"}]': finished 2026-03-09T17:51:48.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:48 vm06 ceph-mon[53878]: osdmap e9: 4 total, 0 up, 4 in 2026-03-09T17:51:48.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:48 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:48.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:48 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:48.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:48 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:48.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:48 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:48.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:48 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1160315018' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5144a6b7-3345-4634-9f5f-5f16e2d43b37"}]: dispatch 2026-03-09T17:51:48.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:48 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1160315018' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5144a6b7-3345-4634-9f5f-5f16e2d43b37"}]': finished 2026-03-09T17:51:48.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:48 vm09 ceph-mon[62586]: osdmap e9: 4 total, 0 up, 4 in 2026-03-09T17:51:48.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:48 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:48.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:48 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:48.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:48 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:48.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:48 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:49.489 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:49.672 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:49.696 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:49 vm06 ceph-mon[53878]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:49.696 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:49 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2799132455' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:49.696 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:49 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/528884358' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:49.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:49 vm09 ceph-mon[62586]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:49.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:49 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2799132455' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:49.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:49 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/528884358' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:49.891 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:49.959 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773078707,"num_remapped_pgs":0} 2026-03-09T17:51:50.482 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:50 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3307246397' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:50.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:50 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3307246397' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:50.959 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:51.124 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:51.356 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:51.413 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":10,"num_osds":5,"num_up_osds":0,"osd_up_since":0,"num_in_osds":5,"osd_in_since":1773078710,"num_remapped_pgs":0} 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/3979493596' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "683b3f8e-548a-4e5a-a4b5-3310ecea811d"}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "683b3f8e-548a-4e5a-a4b5-3310ecea811d"}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "683b3f8e-548a-4e5a-a4b5-3310ecea811d"}]': finished 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: osdmap e10: 5 total, 0 up, 5 in 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/3450186841' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:51.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:51 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/129377138' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/3979493596' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "683b3f8e-548a-4e5a-a4b5-3310ecea811d"}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "683b3f8e-548a-4e5a-a4b5-3310ecea811d"}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "683b3f8e-548a-4e5a-a4b5-3310ecea811d"}]': finished 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: osdmap e10: 5 total, 0 up, 5 in 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/3450186841' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:51.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:51 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/129377138' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:52.414 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:52.436 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1422392730' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "58c6abe1-279b-44cd-8e9b-95986ca9d27a"}]: dispatch 2026-03-09T17:51:52.436 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1422392730' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "58c6abe1-279b-44cd-8e9b-95986ca9d27a"}]': finished 2026-03-09T17:51:52.436 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: osdmap e11: 6 total, 0 up, 6 in 2026-03-09T17:51:52.436 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:52.437 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:52.437 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:52.437 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:52.437 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:52.437 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:52.437 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:52 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/684321071' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:52.579 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:52.818 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1422392730' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "58c6abe1-279b-44cd-8e9b-95986ca9d27a"}]: dispatch 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1422392730' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "58c6abe1-279b-44cd-8e9b-95986ca9d27a"}]': finished 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: osdmap e11: 6 total, 0 up, 6 in 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:52 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/684321071' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:52.881 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1773078711,"num_remapped_pgs":0} 2026-03-09T17:51:53.827 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:53 vm09 ceph-mon[62586]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:53.828 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:53 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/778479462' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:53.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:53 vm06 ceph-mon[53878]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:53.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:53 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/778479462' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:53.882 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:54.062 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:54.287 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:54.339 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1773078711,"num_remapped_pgs":0} 2026-03-09T17:51:54.444 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1785397776' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:54.444 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2978916859' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "abc221fa-4eae-417c-a678-cc476a5ad3c2"}]: dispatch 2026-03-09T17:51:54.780 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1785397776' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:54.780 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/2978916859' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "abc221fa-4eae-417c-a678-cc476a5ad3c2"}]: dispatch 2026-03-09T17:51:54.780 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "abc221fa-4eae-417c-a678-cc476a5ad3c2"}]: dispatch 2026-03-09T17:51:54.780 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "abc221fa-4eae-417c-a678-cc476a5ad3c2"}]': finished 2026-03-09T17:51:54.780 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: osdmap e12: 7 total, 0 up, 7 in 2026-03-09T17:51:54.781 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:54.781 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:54.781 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:54.781 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:54.781 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:54.781 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:54.781 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:54 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "abc221fa-4eae-417c-a678-cc476a5ad3c2"}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "abc221fa-4eae-417c-a678-cc476a5ad3c2"}]': finished 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: osdmap e12: 7 total, 0 up, 7 in 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:54.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:54 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:55.340 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/1558464118' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3934855325' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9986d133-10d5-4cca-be5e-8bbb6d7e6fac"}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3934855325' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9986d133-10d5-4cca-be5e-8bbb6d7e6fac"}]': finished 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: osdmap e13: 8 total, 0 up, 8 in 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:55.446 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:55 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:55.533 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:55.767 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:55.832 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078715,"num_remapped_pgs":0} 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/1558464118' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3934855325' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9986d133-10d5-4cca-be5e-8bbb6d7e6fac"}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3934855325' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9986d133-10d5-4cca-be5e-8bbb6d7e6fac"}]': finished 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: osdmap e13: 8 total, 0 up, 8 in 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:55 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:56.833 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:56.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:56 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3452995403' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:56.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:56 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2705718070' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:56.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:56 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3452995403' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T17:51:56.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:56 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2705718070' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:57.006 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:57.243 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:57.289 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078715,"num_remapped_pgs":0} 2026-03-09T17:51:57.452 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:57 vm06 ceph-mon[53878]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:57.452 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:57 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4046837123' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:57.542 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:57 vm09 ceph-mon[62586]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:57.542 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:57 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4046837123' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:58.289 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:51:58.487 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2414919236' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f242d1d0-8ade-4644-adb3-addcc05baccf"}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f242d1d0-8ade-4644-adb3-addcc05baccf"}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f242d1d0-8ade-4644-adb3-addcc05baccf"}]': finished 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: osdmap e14: 9 total, 0 up, 9 in 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2798956116' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='client.? 192.168.123.109:0/2798956116' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]': finished 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: osdmap e15: 8 total, 0 up, 8 in 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:58.510 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:58 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:58.765 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/2414919236' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f242d1d0-8ade-4644-adb3-addcc05baccf"}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f242d1d0-8ade-4644-adb3-addcc05baccf"}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f242d1d0-8ade-4644-adb3-addcc05baccf"}]': finished 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: osdmap e14: 9 total, 0 up, 9 in 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/2798956116' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='client.? 192.168.123.109:0/2798956116' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]': finished 2026-03-09T17:51:58.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: osdmap e15: 8 total, 0 up, 8 in 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:58.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:58 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:58.848 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2997040237' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a171c3b3-ff38-47bd-a309-1b530351cf86"}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a171c3b3-ff38-47bd-a309-1b530351cf86"}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a171c3b3-ff38-47bd-a309-1b530351cf86"}]': finished 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: osdmap e16: 9 total, 0 up, 9 in 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_id: all-available-devices 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_name: osd.all-available-devices 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: placement: 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: host_pattern: '*' 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: spec: 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_devices: 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: all: true 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: filter_logic: AND 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: objectstore: bluestore 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-253171c5-78ba-4ea4-8025-ff915df37c11 /dev/nvme0n1 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-253171c5-78ba-4ea4-8025-ff915df37c11" successfully created 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 ceph-253171c5-78ba-4ea4-8025-ff915df37c11 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61" created. 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 4560bd77-dc28-428e-808a-b08a3b7c4f61 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:43.855+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:44.123+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f /dev/nvme1n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f" successfully created 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec" created. 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/ 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid d74ddbaf-1751-4a6a-90ef-58defa6b7fec --setuser ceph --setgroup ceph 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.245+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) No valid bdev label found 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.507+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 /dev/nvme2n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-f7a153dc-290e-4681-9b92-67b80457d0e1" successfully created 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d" created. 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 683b3f8e-548a-4e5a-a4b5-3310ecea811d --setuser ceph --setgroup ceph 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:50.929+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:51.191+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-cf5ec41b-bf72-479a-b782-4be440978a90 /dev/nvme3n1 2026-03-09T17:51:59.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-cf5ec41b-bf72-479a-b782-4be440978a90" successfully created 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 ceph-cf5ec41b-bf72-479a-b782-4be440978a90 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2" created. 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-6/activate.monmap 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.6 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/keyring 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/ 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 6 --monmap /var/lib/ceph/osd/ceph-6/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-6/ --osd-uuid abc221fa-4eae-417c-a678-cc476a5ad3c2 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:54.826+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6//block) No valid bdev label found 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:55.094+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6/) _read_fsid unparsable uuid 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new f242d1d0-8ade-4644-adb3-addcc05baccf 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-26af5c46-83ed-4da6-a11e-2f98d1bb9ec6 /dev/vdb 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID Iy6rn9q4nJ3CgbkBWRFvhGo8I9zVqvKL. 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: if self._apply_service(spec): 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return future.result(timeout) 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.__get_result() 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise self._exception 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-09T17:51:59.838 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return await gather(*futures) 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret_msg = await self.create_single_host( 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise RuntimeError( 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-253171c5-78ba-4ea4-8025-ff915df37c11 /dev/nvme0n1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-253171c5-78ba-4ea4-8025-ff915df37c11" successfully created 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 ceph-253171c5-78ba-4ea4-8025-ff915df37c11 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61" created. 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 4560bd77-dc28-428e-808a-b08a3b7c4f61 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:43.855+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:44.123+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f /dev/nvme1n1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f" successfully created 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec" created. 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.2 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/ 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid d74ddbaf-1751-4a6a-90ef-58defa6b7fec --setuser ceph --setgroup ceph 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.245+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) No valid bdev label found 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.507+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-09T17:51:59.839 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 /dev/nvme2n1 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-f7a153dc-290e-4681-9b92-67b80457d0e1" successfully created 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d" created. 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 683b3f8e-548a-4e5a-a4b5-3310ecea811d --setuser ceph --setgroup ceph 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:50.929+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:51.191+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-cf5ec41b-bf72-479a-b782-4be440978a90 /dev/nvme3n1 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-cf5ec41b-bf72-479a-b782-4be440978a90" successfully created 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 ceph-cf5ec41b-bf72-479a-b782-4be440978a90 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2" created. 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-6/activate.monmap 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.6 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/keyring 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/ 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 6 --monmap /var/lib/ceph/osd/ceph-6/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-6/ --osd-uuid abc221fa-4eae-417c-a678-cc476a5ad3c2 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:54.826+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6//block) No valid bdev label found 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:55.094+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6/) _read_fsid unparsable uuid 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new f242d1d0-8ade-4644-adb3-addcc05baccf 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-26af5c46-83ed-4da6-a11e-2f98d1bb9ec6 /dev/vdb 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID Iy6rn9q4nJ3CgbkBWRFvhGo8I9zVqvKL. 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-09T17:51:59.840 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2026691895' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2026691895' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]': finished 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: osdmap e17: 8 total, 0 up, 8 in 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3809009406' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:59.841 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:51:59 vm09 ceph-mon[62586]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2997040237' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a171c3b3-ff38-47bd-a309-1b530351cf86"}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a171c3b3-ff38-47bd-a309-1b530351cf86"}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a171c3b3-ff38-47bd-a309-1b530351cf86"}]': finished 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: osdmap e16: 9 total, 0 up, 9 in 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: service_id: all-available-devices 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: service_name: osd.all-available-devices 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: placement: 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: host_pattern: '*' 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: spec: 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: data_devices: 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: all: true 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: filter_logic: AND 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: objectstore: bluestore 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-253171c5-78ba-4ea4-8025-ff915df37c11 /dev/nvme0n1 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-253171c5-78ba-4ea4-8025-ff915df37c11" successfully created 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 ceph-253171c5-78ba-4ea4-8025-ff915df37c11 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61" created. 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 4560bd77-dc28-428e-808a-b08a3b7c4f61 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:43.855+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:44.123+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f /dev/nvme1n1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f" successfully created 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec" created. 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/ 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid d74ddbaf-1751-4a6a-90ef-58defa6b7fec --setuser ceph --setgroup ceph 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.245+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) No valid bdev label found 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.507+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 /dev/nvme2n1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-f7a153dc-290e-4681-9b92-67b80457d0e1" successfully created 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d" created. 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 683b3f8e-548a-4e5a-a4b5-3310ecea811d --setuser ceph --setgroup ceph 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:50.929+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:51.191+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-cf5ec41b-bf72-479a-b782-4be440978a90 /dev/nvme3n1 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-cf5ec41b-bf72-479a-b782-4be440978a90" successfully created 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 ceph-cf5ec41b-bf72-479a-b782-4be440978a90 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2" created. 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-6/activate.monmap 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.6 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/keyring 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/ 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 6 --monmap /var/lib/ceph/osd/ceph-6/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-6/ --osd-uuid abc221fa-4eae-417c-a678-cc476a5ad3c2 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:54.826+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6//block) No valid bdev label found 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:55.094+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6/) _read_fsid unparsable uuid 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new f242d1d0-8ade-4644-adb3-addcc05baccf 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-26af5c46-83ed-4da6-a11e-2f98d1bb9ec6 /dev/vdb 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID Iy6rn9q4nJ3CgbkBWRFvhGo8I9zVqvKL. 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: exec(code, run_globals) 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: if self._apply_service(spec): 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return future.result(timeout) 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return self.__get_result() 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: raise self._exception 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-09T17:51:59.850 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return await gather(*futures) 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ret_msg = await self.create_single_host( 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: raise RuntimeError( 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-253171c5-78ba-4ea4-8025-ff915df37c11 /dev/nvme0n1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-253171c5-78ba-4ea4-8025-ff915df37c11" successfully created 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 ceph-253171c5-78ba-4ea4-8025-ff915df37c11 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61" created. 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 4560bd77-dc28-428e-808a-b08a3b7c4f61 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:43.855+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:44.123+0000 7f9372833740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-253171c5-78ba-4ea4-8025-ff915df37c11/osd-block-4560bd77-dc28-428e-808a-b08a3b7c4f61 /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f /dev/nvme1n1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f" successfully created 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec" created. 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.2 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/ 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid d74ddbaf-1751-4a6a-90ef-58defa6b7fec --setuser ceph --setgroup ceph 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.245+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) No valid bdev label found 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:47.507+0000 7f5125bc1740 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-533ca8ed-dc97-4636-98b7-4e0eca609f1f/osd-block-d74ddbaf-1751-4a6a-90ef-58defa6b7fec /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-09T17:51:59.851 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 /dev/nvme2n1 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-f7a153dc-290e-4681-9b92-67b80457d0e1" successfully created 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d ceph-f7a153dc-290e-4681-9b92-67b80457d0e1 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d" created. 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 683b3f8e-548a-4e5a-a4b5-3310ecea811d --setuser ceph --setgroup ceph 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:50.929+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:51.191+0000 7fc514bdb740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-f7a153dc-290e-4681-9b92-67b80457d0e1/osd-block-683b3f8e-548a-4e5a-a4b5-3310ecea811d /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-cf5ec41b-bf72-479a-b782-4be440978a90 /dev/nvme3n1 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-cf5ec41b-bf72-479a-b782-4be440978a90" successfully created 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 ceph-cf5ec41b-bf72-479a-b782-4be440978a90 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2" created. 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-6/activate.monmap 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.6 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/keyring 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/ 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 6 --monmap /var/lib/ceph/osd/ceph-6/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-6/ --osd-uuid abc221fa-4eae-417c-a678-cc476a5ad3c2 --setuser ceph --setgroup ceph 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:54.826+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6//block) No valid bdev label found 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: 2026-03-09T17:51:55.094+0000 7ff029d05740 -1 bluestore(/var/lib/ceph/osd/ceph-6/) _read_fsid unparsable uuid 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-cf5ec41b-bf72-479a-b782-4be440978a90/osd-block-abc221fa-4eae-417c-a678-cc476a5ad3c2 /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new f242d1d0-8ade-4644-adb3-addcc05baccf 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-26af5c46-83ed-4da6-a11e-2f98d1bb9ec6 /dev/vdb 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID Iy6rn9q4nJ3CgbkBWRFvhGo8I9zVqvKL. 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-09T17:51:59.852 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: exec(code, run_globals) 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpz_y57gwz:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpfrqpns35:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2026691895' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2026691895' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]': finished 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: osdmap e17: 8 total, 0 up, 8 in 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3809009406' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:51:59.853 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:51:59 vm06 ceph-mon[53878]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:51:59.853 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:00.011 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:00.221 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:00.262 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:00.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:00 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1406014539' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:00.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:00 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1406014539' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:01.263 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:01.431 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:01.546 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:01 vm06 ceph-mon[53878]: pgmap v26: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:01.662 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:01.720 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:01.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:01 vm09 ceph-mon[62586]: pgmap v26: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:02.482 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:02 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3206497718' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:02.720 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:02.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:02 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3206497718' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:02.877 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:03.093 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:03.138 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:03 vm09 ceph-mon[62586]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:03 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4275983343' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:03.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:03 vm06 ceph-mon[53878]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:03.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:03 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4275983343' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:04.139 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:04.298 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:04.506 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:04.568 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:04 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2882589237' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:04.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:04 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2882589237' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:05.569 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:05.725 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:05.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:05 vm09 ceph-mon[62586]: pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:05.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:05 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:52:05.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:05 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:05.839 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:05 vm06 ceph-mon[53878]: pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:05.839 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:05 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:52:05.839 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:05 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:05.935 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:05.998 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:06.999 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:07.020 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:06 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3115863123' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:07.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:06 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3115863123' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:07.168 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:07.397 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:07.458 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:08.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:07 vm09 ceph-mon[62586]: pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:08.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:07 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3686228393' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:08.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:07 vm06 ceph-mon[53878]: pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:08.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:07 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3686228393' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:08.459 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:08.621 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:08.845 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:08.889 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:09.890 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:10.047 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:10.068 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:09 vm06 ceph-mon[53878]: pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:10.068 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:09 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4015830111' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:10.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:09 vm09 ceph-mon[62586]: pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:10.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:09 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4015830111' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:10.267 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:10.315 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:11.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:10 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3026525276' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:11.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:10 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3026525276' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:11.315 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:11.473 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:11.675 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:11.717 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:12.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:11 vm09 ceph-mon[62586]: pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:12.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:11 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2115785538' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:12.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:11 vm06 ceph-mon[53878]: pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:12.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:11 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2115785538' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:12.718 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:12.892 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:13.115 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:13.160 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:14.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:13 vm09 ceph-mon[62586]: pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:14.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:13 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1562155151' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:14.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:13 vm06 ceph-mon[53878]: pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:14.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:13 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1562155151' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:14.161 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:14.320 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:14.526 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:14.572 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:15.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:14 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3645801419' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:15.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:14 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3645801419' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:15.573 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:15.741 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:15.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:15 vm06 ceph-mon[53878]: pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:15.955 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:16.001 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:16.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:15 vm09 ceph-mon[62586]: pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:17.002 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:17.025 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:16 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2068657166' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:17.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:16 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2068657166' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:17.165 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:17.387 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:17.435 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:18.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:17 vm09 ceph-mon[62586]: pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:18.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:17 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3674558428' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:18.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:17 vm06 ceph-mon[53878]: pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:18.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:17 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3674558428' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:18.436 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:18.598 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:18.815 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:18.866 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:19.867 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:20.038 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:20.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:19 vm06 ceph-mon[53878]: pgmap v35: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:20.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:19 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2007733031' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:20.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:19 vm09 ceph-mon[62586]: pgmap v35: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:20.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:19 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2007733031' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:20.258 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:20.313 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:21.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:20 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2777640393' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:21.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:20 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:21.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:20 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2777640393' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:21.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:20 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:21.314 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:21.474 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:21.692 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:21.736 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:21.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:21 vm06 ceph-mon[53878]: pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:21.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:21 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1438568164' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:22.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:21 vm09 ceph-mon[62586]: pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:22.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:21 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1438568164' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:22.737 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:22.895 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:23.108 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:23.168 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:24.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:23 vm09 ceph-mon[62586]: pgmap v37: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:24.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:23 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2547451254' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:24.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:23 vm06 ceph-mon[53878]: pgmap v37: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:24.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:23 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2547451254' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:24.169 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:24.329 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:24.534 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:24.576 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:24.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:24 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2402170554' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:25.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:24 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2402170554' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:25.577 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:25.744 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:25.859 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:25 vm06 ceph-mon[53878]: pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:25.957 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:26.017 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:26.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:25 vm09 ceph-mon[62586]: pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:27.018 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:27.041 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:26 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/808150864' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:27.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:26 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/808150864' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:27.179 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:27.390 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:27.449 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:28.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:27 vm09 ceph-mon[62586]: pgmap v39: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:28.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:27 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2127874307' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:28.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:27 vm06 ceph-mon[53878]: pgmap v39: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:28.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:27 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2127874307' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:28.450 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:28.607 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:28.818 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:28.864 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:29.864 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:30.028 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:30.051 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:29 vm06 ceph-mon[53878]: pgmap v40: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:30.051 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:29 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1768079908' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:30.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:29 vm09 ceph-mon[62586]: pgmap v40: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:30.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:29 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1768079908' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:30.243 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:30.309 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:31.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:30 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/920179097' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:31.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:30 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/920179097' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:31.309 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:31.473 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:31.687 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:31.753 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:31.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:31 vm06 ceph-mon[53878]: pgmap v41: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:31.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:31 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2619057534' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:32.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:31 vm09 ceph-mon[62586]: pgmap v41: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:32.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:31 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2619057534' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:32.753 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:32.919 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:33.133 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:33.458 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:33.812 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:33 vm06 ceph-mon[53878]: pgmap v42: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:33.812 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:33 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1189443350' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:34.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:33 vm09 ceph-mon[62586]: pgmap v42: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:34.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:33 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1189443350' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:34.459 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:34.618 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:34.834 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:34.892 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:35.893 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:36.057 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:36.079 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:35 vm06 ceph-mon[53878]: pgmap v43: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:36.079 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:35 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1656872220' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:36.079 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:36.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:35 vm09 ceph-mon[62586]: pgmap v43: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:36.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:35 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1656872220' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:36.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:35 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:36.273 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:36.341 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:36 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3467774299' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:37.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:36 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3467774299' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:37.341 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:37.512 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:37.725 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:37.771 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:37.829 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:37 vm06 ceph-mon[53878]: pgmap v44: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:37.829 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:37 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/342766154' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:38.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:37 vm09 ceph-mon[62586]: pgmap v44: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:38.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:37 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/342766154' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:38.771 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:38.943 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:39.205 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:39.278 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:40.279 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:40.303 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:39 vm06 ceph-mon[53878]: pgmap v45: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:40.303 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:39 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3373314229' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:40.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:39 vm09 ceph-mon[62586]: pgmap v45: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:40.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:39 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3373314229' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:40.456 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:40.666 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:40.712 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:41.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:40 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1506970900' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:41.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:40 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1506970900' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:41.712 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:41.879 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:41.994 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:41 vm06 ceph-mon[53878]: pgmap v46: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:42.240 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:42.291 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:42.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:41 vm09 ceph-mon[62586]: pgmap v46: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:43.292 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:43.319 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:42 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/853629483' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:43.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:43 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/853629483' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:43.473 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:43.830 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:43.892 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:44.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:44 vm06 ceph-mon[53878]: pgmap v47: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:44.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3074225721' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:44.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:44 vm09 ceph-mon[62586]: pgmap v47: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:44.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3074225721' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:44.893 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:45.062 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:45.268 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:45.331 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:46.331 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:46.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:46 vm09 ceph-mon[62586]: pgmap v48: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:46.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:46 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2057213081' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:46.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:46 vm06 ceph-mon[53878]: pgmap v48: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:46.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:46 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2057213081' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:46.492 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:46.712 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:46.775 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:47.010 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:47 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2153848562' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:47.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:47 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2153848562' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:47.776 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:47.939 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:48.054 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:48 vm06 ceph-mon[53878]: pgmap v49: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:48.160 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:48.206 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:48.334 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:48 vm09 ceph-mon[62586]: pgmap v49: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:49.206 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:49.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:49 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2144905938' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:49.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:49 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2144905938' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:49.368 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:49.574 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:49.637 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:50.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:50 vm09 ceph-mon[62586]: pgmap v50: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:50.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:50 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2930358164' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:50.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:50 vm06 ceph-mon[53878]: pgmap v50: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:50.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:50 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2930358164' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:50.637 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:50.809 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:51.033 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:51.076 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:51.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:51.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:52:52.077 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:52.244 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:52.285 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:52 vm06 ceph-mon[53878]: pgmap v51: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:52.285 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:52 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1773195214' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:52.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:52 vm09 ceph-mon[62586]: pgmap v51: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:52.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:52 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1773195214' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:52.456 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:52.501 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:53.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:53 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/848924114' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:53.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:53 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/848924114' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:53.502 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:53.669 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:53.895 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:53.967 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:54.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:54 vm09 ceph-mon[62586]: pgmap v52: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:54.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:54 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4265420541' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:54.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:54 vm06 ceph-mon[53878]: pgmap v52: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:54.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:54 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4265420541' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:54.967 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:55.142 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:55.362 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:55.418 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:56.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:56 vm09 ceph-mon[62586]: pgmap v53: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:56.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:56 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1083963640' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:56.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:56 vm06 ceph-mon[53878]: pgmap v53: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:56.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:56 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1083963640' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:56.419 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:56.587 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:56.808 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:56.872 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:57.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:57 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2243173146' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:57.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:57 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2243173146' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:57.873 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:58.043 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:58.094 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:58 vm06 ceph-mon[53878]: pgmap v54: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:58.253 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:58.300 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:52:58.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:58 vm09 ceph-mon[62586]: pgmap v54: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:52:59.234 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:59 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/641549146' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:59.234 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:52:59 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:52:59.301 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:52:59.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:59 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/641549146' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:52:59.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:52:59 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:52:59.515 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:52:59.763 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:52:59.839 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:00.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:00 vm06 ceph-mon[53878]: pgmap v55: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:00.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:00 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:00 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:00 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/85231610' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:00.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:00 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.064 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:00 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.324 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:00 vm09 ceph-mon[62586]: pgmap v55: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:00.324 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:00 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.324 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:00 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.324 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:00 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/85231610' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:00.324 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:00 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.324 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:00 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:00.840 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:01.030 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:01.305 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: pgmap v56: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:53:01.305 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: pgmap v56: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:53:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:53:01.361 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:02 vm09 ceph-mon[62586]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:02 vm09 ceph-mon[62586]: Cluster is now healthy 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:02 vm09 ceph-mon[62586]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_id: all-available-devices 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_name: osd.all-available-devices 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: placement: 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: host_pattern: '*' 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: spec: 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_devices: 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: all: true 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: filter_logic: AND 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: objectstore: bluestore 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-09T17:53:02.336 INFO:journalctl@ceph.mon.vm09.vm09.stdout: if self._apply_service(spec): 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return future.result(timeout) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.__get_result() 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise self._exception 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return await gather(*futures) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret_msg = await self.create_single_host( 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise RuntimeError( 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:53:02.337 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:02 vm09 ceph-mon[62586]: pgmap v57: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:02.338 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:02 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1188030546' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:02 vm06 ceph-mon[53878]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:02 vm06 ceph-mon[53878]: Cluster is now healthy 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:02 vm06 ceph-mon[53878]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: service_id: all-available-devices 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: service_name: osd.all-available-devices 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: placement: 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: host_pattern: '*' 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: spec: 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: data_devices: 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: all: true 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: filter_logic: AND 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: objectstore: bluestore 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:53:02.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: exec(code, run_globals) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: if self._apply_service(spec): 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return future.result(timeout) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return self.__get_result() 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: raise self._exception 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return await gather(*futures) 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ret_msg = await self.create_single_host( 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: raise RuntimeError( 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.348 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: exec(code, run_globals) 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm06 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp9sgi0i06:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp8zsbnmye:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:02 vm06 ceph-mon[53878]: pgmap v57: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:02.349 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:02 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1188030546' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:02.362 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:02.531 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:02.759 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:02.830 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:03.073 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:03 vm06 ceph-mon[53878]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:53:03.073 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:03 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3501644198' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:03.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:03 vm09 ceph-mon[62586]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:53:03.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:03 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3501644198' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:03.831 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:03.994 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:04.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:04 vm06 ceph-mon[53878]: pgmap v58: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:04.206 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:04.268 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:04.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:04 vm09 ceph-mon[62586]: pgmap v58: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:05.269 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:05.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:05 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3373603617' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:05.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:05 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3373603617' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:05.443 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:05.658 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:05.706 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:06.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:06 vm06 ceph-mon[53878]: pgmap v59: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:06.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:06 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:06.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:06 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2063515969' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:06.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:06 vm09 ceph-mon[62586]: pgmap v59: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:06.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:06 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:06.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:06 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2063515969' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:06.706 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:06.866 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:07.083 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:07.145 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:07.255 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:07 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3144161621' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:07.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:07 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3144161621' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:08.146 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:08.308 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:08.354 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:08 vm06 ceph-mon[53878]: pgmap v60: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:08.528 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:08.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:08 vm09 ceph-mon[62586]: pgmap v60: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:08.592 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:09.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:09 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1538615016' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:09.593 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:09.596 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:09 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1538615016' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:09.757 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:09.975 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:10.041 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:10.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:10 vm06 ceph-mon[53878]: pgmap v61: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:10.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:10 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4265172048' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:10.481 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:10 vm09 ceph-mon[62586]: pgmap v61: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:10.481 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:10 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4265172048' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:11.041 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:11.214 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:11.424 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:11.469 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:12.470 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:12.493 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:12 vm06 ceph-mon[53878]: pgmap v62: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:12.493 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:12 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3545911608' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:12.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:12 vm09 ceph-mon[62586]: pgmap v62: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:12.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:12 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3545911608' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:12.636 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:12.848 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:12.894 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:13.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:13 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1959673783' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:13.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:13 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1959673783' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:13.895 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:14.065 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:14.286 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:14.348 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:14.584 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:14 vm09 ceph-mon[62586]: pgmap v63: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:14.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:14 vm06 ceph-mon[53878]: pgmap v63: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:15.349 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:15.482 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:15 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2272098235' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:15.508 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:15.530 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:15 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2272098235' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:15.737 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:15.784 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:16.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:16 vm09 ceph-mon[62586]: pgmap v64: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:16.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:16 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1568524508' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:16.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:16 vm06 ceph-mon[53878]: pgmap v64: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:16.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:16 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1568524508' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:16.784 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:16.962 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:17.166 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:17.231 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:17.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:17 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3444229872' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:17.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:17 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3444229872' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:18.231 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:18.390 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:18.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:18 vm06 ceph-mon[53878]: pgmap v65: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:18.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:18 vm09 ceph-mon[62586]: pgmap v65: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:18.608 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:18.672 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:19.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:19 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3538731412' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:19.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:19 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3538731412' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:19.673 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:19.834 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:20.059 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:20.127 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:20.239 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:20 vm06 ceph-mon[53878]: pgmap v66: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:20.239 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:20 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4166957264' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:20.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:20 vm09 ceph-mon[62586]: pgmap v66: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:20.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:20 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4166957264' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:21.128 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:21.299 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:21.415 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:21 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:21.517 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:21.579 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:21.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:21 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:22.580 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:22.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:22 vm09 ceph-mon[62586]: pgmap v67: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:22.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:22 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/782952623' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:22.596 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:22 vm06 ceph-mon[53878]: pgmap v67: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:22.596 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:22 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/782952623' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:22.736 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:22.941 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:23.003 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:23.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:23 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2657216115' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:23.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:23 vm09 ceph-mon[62586]: pgmap v68: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:23.596 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:23 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2657216115' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:23.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:23 vm06 ceph-mon[53878]: pgmap v68: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:24.004 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:24.165 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:24.373 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:24.434 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:24.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:24 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2545422774' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:24.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:24 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2545422774' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:25.435 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:25.603 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:25.718 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:25 vm06 ceph-mon[53878]: pgmap v69: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:25.816 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:25.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:25 vm09 ceph-mon[62586]: pgmap v69: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:25.882 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:26.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:26 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2444410369' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:26.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:26 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2444410369' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:26.883 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:27.046 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:27.244 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:27.312 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:27.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:27 vm09 ceph-mon[62586]: pgmap v70: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:27.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:27 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4209440032' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:27.842 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:27 vm06 ceph-mon[53878]: pgmap v70: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:27.843 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:27 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4209440032' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:28.313 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:28.475 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:28.688 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:28.751 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:28.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:28 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/861017578' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:29.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:28 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/861017578' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:29.752 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:29.934 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:30.048 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:29 vm06 ceph-mon[53878]: pgmap v71: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:30.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:29 vm09 ceph-mon[62586]: pgmap v71: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:30.148 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:30.198 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:31.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:30 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3860421746' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:31.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:30 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3860421746' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:31.198 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:31.380 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:31.795 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:31.853 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:32.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:31 vm09 ceph-mon[62586]: pgmap v72: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:32.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:31 vm06 ceph-mon[53878]: pgmap v72: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:32.854 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:33.023 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:33.047 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:32 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/127257646' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:33.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:32 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/127257646' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:33.245 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:33.313 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:34.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:33 vm09 ceph-mon[62586]: pgmap v73: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:34.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:33 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2873040607' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:34.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:33 vm06 ceph-mon[53878]: pgmap v73: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:34.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:33 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2873040607' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:34.314 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:34.493 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:34.727 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:34.808 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:35.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:34 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2463941345' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:35.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:34 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2463941345' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:35.809 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:35.977 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:36.031 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:35 vm06 ceph-mon[53878]: pgmap v74: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:36.031 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:35 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:36.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:35 vm09 ceph-mon[62586]: pgmap v74: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:36.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:35 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:36.208 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:36.277 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:37.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:36 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2527079800' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:37.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:36 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2527079800' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:37.277 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:37.455 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:37.685 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:37.740 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:37.843 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:37 vm06 ceph-mon[53878]: pgmap v75: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:37.843 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:37 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1184749915' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:38.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:37 vm09 ceph-mon[62586]: pgmap v75: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:38.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:37 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1184749915' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:38.741 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:38.922 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:39.165 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:39.230 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:39.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:39 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1209311595' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:39.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:39 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1209311595' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:40.231 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:40.411 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:40.483 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:40 vm09 ceph-mon[62586]: pgmap v76: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:40.528 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:40 vm06 ceph-mon[53878]: pgmap v76: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:40.795 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:40.868 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:41.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:41 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/62279440' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:41.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:41 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/62279440' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:41.869 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:42.039 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:42.271 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:42.355 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:42.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:42 vm09 ceph-mon[62586]: pgmap v77: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:42.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:42 vm06 ceph-mon[53878]: pgmap v77: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:43.356 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:43.542 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:43.568 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:43 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/859907871' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:43.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:43 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/859907871' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:43.780 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:43.849 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:44.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:44 vm09 ceph-mon[62586]: pgmap v78: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:44.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:44 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/237227991' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:44.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:44 vm06 ceph-mon[53878]: pgmap v78: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:44.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:44 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/237227991' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:44.850 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:45.018 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:45.238 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:45.289 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:45.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:45 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1509229473' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:45.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:45 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1509229473' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:46.290 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:46.456 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:46.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:46 vm06 ceph-mon[53878]: pgmap v79: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:46.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:46 vm09 ceph-mon[62586]: pgmap v79: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:46.667 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:46.716 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:47.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:47 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3491401676' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:47.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:47 vm09 ceph-mon[62586]: pgmap v80: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:47.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:47 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3491401676' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:47.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:47 vm06 ceph-mon[53878]: pgmap v80: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:47.718 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:47.877 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:48.083 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:48.128 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:48.256 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:48 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/852754438' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:48.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:48 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/852754438' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:49.129 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:49.294 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:49.408 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:49 vm06 ceph-mon[53878]: pgmap v81: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:49.496 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:49.543 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:49.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:49 vm09 ceph-mon[62586]: pgmap v81: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:50.544 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:50.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:50 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/146847667' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:50.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:50 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/146847667' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:50.706 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:50.922 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:50.988 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:51.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:51 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:51.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:51 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4247557299' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:51.585 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:51 vm09 ceph-mon[62586]: pgmap v82: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:51.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:51 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:53:51.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:51 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4247557299' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:51.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:51 vm06 ceph-mon[53878]: pgmap v82: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:51.989 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:52.154 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:52.378 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:52.449 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:52.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:52 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2168788476' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:52.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:52 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2168788476' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:53.449 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:53.613 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:53.689 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:53 vm06 ceph-mon[53878]: pgmap v83: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:53.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:53 vm09 ceph-mon[62586]: pgmap v83: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:53.836 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:53.894 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:54.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:54 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2992079580' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:54.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:54 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2992079580' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:54.895 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:55.048 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:55.257 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:55.299 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:55.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:55 vm06 ceph-mon[53878]: pgmap v84: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:55.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:55 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3290794261' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:55 vm09 ceph-mon[62586]: pgmap v84: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:55.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:55 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3290794261' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:56.300 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:56.456 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:56.659 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:56.721 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:57.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:56 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3253457498' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:57.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:56 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3253457498' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:57.721 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:57.885 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:58.002 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:57 vm06 ceph-mon[53878]: pgmap v85: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:58.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:57 vm09 ceph-mon[62586]: pgmap v85: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:58.094 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:58.159 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:59.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:58 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2930696046' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:59.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:58 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2930696046' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:53:59.160 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:53:59.313 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:53:59.516 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:53:59.577 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:53:59.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:59 vm06 ceph-mon[53878]: pgmap v86: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:53:59.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:53:59 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1379031170' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:00.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:59 vm09 ceph-mon[62586]: pgmap v86: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:00.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:53:59 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1379031170' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:00.578 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:00.734 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:00.928 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:00.990 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:01.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:00 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4083121092' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:01.347 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:00 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4083121092' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:01.991 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:02.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:01 vm06 ceph-mon[53878]: pgmap v87: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:02.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:54:02.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:02.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:01 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:02.178 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:01 vm09 ceph-mon[62586]: pgmap v87: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T17:54:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:02.335 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:01 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:02.412 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:02.464 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:03.465 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:03.670 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2127983701' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: pgmap v88: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:54:03.693 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:03 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2127983701' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: pgmap v88: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T17:54:03.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:03 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T17:54:03.882 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:03.927 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:04 vm09 ceph-mon[62586]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:04 vm09 ceph-mon[62586]: Cluster is now healthy 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:04 vm09 ceph-mon[62586]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_id: all-available-devices 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_name: osd.all-available-devices 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: placement: 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: host_pattern: '*' 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: spec: 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_devices: 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: all: true 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: filter_logic: AND 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: objectstore: bluestore 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:54:04.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: if self._apply_service(spec): 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return future.result(timeout) 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.__get_result() 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T17:54:04.836 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise self._exception 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return await gather(*futures) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret_msg = await self.create_single_host( 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise RuntimeError( 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:04 vm09 ceph-mon[62586]: pgmap v89: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:04.837 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:04 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2188032077' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:04 vm06 ceph-mon[53878]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:04 vm06 ceph-mon[53878]: Cluster is now healthy 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:04 vm06 ceph-mon[53878]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: service_id: all-available-devices 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: service_name: osd.all-available-devices 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: placement: 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: host_pattern: '*' 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: spec: 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: data_devices: 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: all: true 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: filter_logic: AND 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: objectstore: bluestore 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:54:04.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: exec(code, run_globals) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: if self._apply_service(spec): 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return future.result(timeout) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return self.__get_result() 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: raise self._exception 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return await gather(*futures) 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ret_msg = await self.create_single_host( 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: raise RuntimeError( 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm09/config 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.848 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance.main() 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Traceback (most recent call last): 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: return _run_code(code, main_globals, None, 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: exec(code, run_globals) 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 409, in _infer_config 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 324, in _infer_fsid 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 437, in _infer_image 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 311, in _validate_fsid 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3325, in command_ceph_volume 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/run/ceph:z -v /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44:/var/log/ceph:z -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpyrj3jwyg:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp3z2qeio3:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:04 vm06 ceph-mon[53878]: pgmap v89: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:04.849 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:04 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2188032077' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:04.929 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:05.082 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:05.290 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:05.349 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:05.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:05 vm06 ceph-mon[53878]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:54:05.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:05 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1328055893' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:05.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:05 vm09 ceph-mon[62586]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:54:05.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:05 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1328055893' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:06.351 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:06.513 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:06.626 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:06 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:54:06.626 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:06 vm06 ceph-mon[53878]: pgmap v90: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:06.718 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:06.758 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:06.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:06 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:54:06.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:06 vm09 ceph-mon[62586]: pgmap v90: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:07.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:07 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3155977794' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:07.759 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:07.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:07 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3155977794' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:07.918 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:08.131 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:08.181 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:08.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:08 vm09 ceph-mon[62586]: pgmap v91: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:08.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:08 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3958974194' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:08.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:08 vm06 ceph-mon[53878]: pgmap v91: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:08.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:08 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3958974194' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:09.182 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:09.341 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:09.547 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:09.610 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:09.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:09 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3024237065' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:10.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:09 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3024237065' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:10.611 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:10.768 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:10.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:10 vm09 ceph-mon[62586]: pgmap v92: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:10.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:10 vm06 ceph-mon[53878]: pgmap v92: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:10.973 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:11.034 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:12.036 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:12.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:11 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4124177555' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:12.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:11 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4124177555' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:12.199 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:12.406 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:12.469 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:13.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:12 vm09 ceph-mon[62586]: pgmap v93: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:13.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:12 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3455732136' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:13.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:12 vm06 ceph-mon[53878]: pgmap v93: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:13.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:12 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3455732136' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:13.469 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:13.636 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:13.859 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:13.925 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:14.926 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:14.946 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:14 vm06 ceph-mon[53878]: pgmap v94: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:14.946 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:14 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/669236220' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:15.083 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:15.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:14 vm09 ceph-mon[62586]: pgmap v94: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:15.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:14 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/669236220' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:15.298 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:15.343 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:15.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:15 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2562662136' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:16.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:15 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2562662136' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:16.343 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:16.499 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:16.710 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:16.771 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:17.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:16 vm09 ceph-mon[62586]: pgmap v95: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:17.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:16 vm06 ceph-mon[53878]: pgmap v95: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:17.771 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:17.939 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:17.960 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:17 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/3035371345' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:18.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:17 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/3035371345' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:18.148 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:18.220 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:19.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:18 vm09 ceph-mon[62586]: pgmap v96: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:19.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:18 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2018033606' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:19.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:18 vm06 ceph-mon[53878]: pgmap v96: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:19.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:18 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2018033606' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:19.220 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:19.392 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:19.616 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:19.665 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:19.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:19 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/1720168813' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:20.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:19 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/1720168813' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:20.666 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:20.827 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:20.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:20 vm09 ceph-mon[62586]: pgmap v97: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:20.835 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:20 vm09 ceph-mon[62586]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:54:20.941 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:20 vm06 ceph-mon[53878]: pgmap v97: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:20.941 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:20 vm06 ceph-mon[53878]: from='mgr.14219 192.168.123.106:0/3629212474' entity='mgr.vm06.shmhyl' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T17:54:21.029 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:21.085 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:22.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:21 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2937319167' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:22.086 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:22.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:21 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2937319167' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:22.245 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:22.461 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:22.521 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:22.690 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:22 vm06 ceph-mon[53878]: pgmap v98: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:22.690 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:22 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/840481634' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:23.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:22 vm09 ceph-mon[62586]: pgmap v98: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:23.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:22 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/840481634' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:23.522 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:23.677 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:23.885 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:23.932 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:24.933 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:24.953 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:24 vm06 ceph-mon[53878]: pgmap v99: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:24.954 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:24 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4186123157' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:25.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:24 vm09 ceph-mon[62586]: pgmap v99: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:25.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:24 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4186123157' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:25.096 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:25.323 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:25.370 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:25.834 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:25 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4061054290' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:26.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:25 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4061054290' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:26.370 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:26.524 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:26.742 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:26.809 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:27.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:26 vm09 ceph-mon[62586]: pgmap v100: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:27.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:26 vm06 ceph-mon[53878]: pgmap v100: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:27.810 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:27.967 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:27.988 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:27 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2740949239' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:28.084 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:27 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2740949239' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:28.178 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:28.222 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:29.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:28 vm09 ceph-mon[62586]: pgmap v101: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:29.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:28 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/657533715' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:29.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:28 vm06 ceph-mon[53878]: pgmap v101: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:29.096 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:28 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/657533715' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:29.223 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:29.377 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:29.596 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:29.655 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:29.846 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:29 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/4200698641' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:30.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:29 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/4200698641' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:30.656 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:30.820 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:30.834 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:30 vm09 ceph-mon[62586]: pgmap v102: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:30.935 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:30 vm06 ceph-mon[53878]: pgmap v102: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:31.030 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:31.088 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:32.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:31 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/2881061660' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:32.089 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:32.097 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:31 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/2881061660' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:32.252 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:32.464 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:32.530 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:32.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:32 vm06 ceph-mon[53878]: pgmap v103: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:32.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:32 vm06 ceph-mon[53878]: from='client.? 192.168.123.106:0/523664561' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:33.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:32 vm09 ceph-mon[62586]: pgmap v103: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T17:54:33.085 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:32 vm09 ceph-mon[62586]: from='client.? 192.168.123.106:0/523664561' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T17:54:33.531 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph osd stat -f json 2026-03-09T17:54:33.702 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:33.909 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T17:54:33.957 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773078717,"num_remapped_pgs":0} 2026-03-09T17:54:33.957 ERROR:teuthology.contextutil:Saw exception from nested tasks Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/contextutil.py", line 30, in nested vars.append(enter()) File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks/cephadm.py", line 1140, in ceph_osds while proceed(): File "/home/teuthos/teuthology/teuthology/contextutil.py", line 134, in __call__ raise MaxWhileTries(error_msg) teuthology.exceptions.MaxWhileTries: reached maximum tries (120) after waiting for 120 seconds 2026-03-09T17:54:33.958 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T17:54:33.982 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T17:54:34.008 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-09T17:54:34.008 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 -- ceph mgr module disable cephadm 2026-03-09T17:54:34.180 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/mon.vm06/config 2026-03-09T17:54:34.197 INFO:teuthology.orchestra.run.vm06.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-03-09T17:54:34.216 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-09T17:54:34.216 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-09T17:54:34.216 DEBUG:teuthology.orchestra.run.vm06:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T17:54:34.229 DEBUG:teuthology.orchestra.run.vm09:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T17:54:34.245 INFO:tasks.cephadm:Stopping all daemons... 2026-03-09T17:54:34.245 INFO:tasks.cephadm.mon.vm06:Stopping mon.vm06... 2026-03-09T17:54:34.245 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm06 2026-03-09T17:54:34.559 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:34 vm06 systemd[1]: Stopping Ceph mon.vm06 for 588d7312-1be0-11f1-b5b6-61233c7d7c44... 2026-03-09T17:54:34.559 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:34 vm06 ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44-mon-vm06[53874]: 2026-03-09T17:54:34.365+0000 7f5a2f369640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm06 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T17:54:34.559 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 09 17:54:34 vm06 ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44-mon-vm06[53874]: 2026-03-09T17:54:34.365+0000 7f5a2f369640 -1 mon.vm06@0(leader) e2 *** Got Signal Terminated *** 2026-03-09T17:54:34.638 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm06.service' 2026-03-09T17:54:34.668 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T17:54:34.668 INFO:tasks.cephadm.mon.vm06:Stopped mon.vm06 2026-03-09T17:54:34.668 INFO:tasks.cephadm.mon.vm09:Stopping mon.vm09... 2026-03-09T17:54:34.669 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm09 2026-03-09T17:54:35.030 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:34 vm09 systemd[1]: Stopping Ceph mon.vm09 for 588d7312-1be0-11f1-b5b6-61233c7d7c44... 2026-03-09T17:54:35.030 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:34 vm09 ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44-mon-vm09[62582]: 2026-03-09T17:54:34.816+0000 7fba8d4bf640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm09 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T17:54:35.030 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:34 vm09 ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44-mon-vm09[62582]: 2026-03-09T17:54:34.816+0000 7fba8d4bf640 -1 mon.vm09@1(peon) e2 *** Got Signal Terminated *** 2026-03-09T17:54:35.030 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 09 17:54:35 vm09 podman[69982]: 2026-03-09 17:54:35.03057347 +0000 UTC m=+0.231076417 container died 61bd85142c0cb05badb4492e801805f01f6fe641688eebf6088b96921ed2f9b2 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44-mon-vm09, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.vendor=CentOS) 2026-03-09T17:54:35.100 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-588d7312-1be0-11f1-b5b6-61233c7d7c44@mon.vm09.service' 2026-03-09T17:54:35.130 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T17:54:35.130 INFO:tasks.cephadm.mon.vm09:Stopped mon.vm09 2026-03-09T17:54:35.130 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 --force --keep-logs 2026-03-09T17:54:35.249 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:54:38.092 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 --force --keep-logs 2026-03-09T17:54:38.216 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:54:40.051 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T17:54:40.078 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T17:54:40.102 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-09T17:54:40.102 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592/remote/vm06/crash 2026-03-09T17:54:40.102 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash -- . 2026-03-09T17:54:40.141 INFO:teuthology.orchestra.run.vm06.stderr:tar: /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash: Cannot open: No such file or directory 2026-03-09T17:54:40.141 INFO:teuthology.orchestra.run.vm06.stderr:tar: Error is not recoverable: exiting now 2026-03-09T17:54:40.142 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592/remote/vm09/crash 2026-03-09T17:54:40.142 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash -- . 2026-03-09T17:54:40.166 INFO:teuthology.orchestra.run.vm09.stderr:tar: /var/lib/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/crash: Cannot open: No such file or directory 2026-03-09T17:54:40.166 INFO:teuthology.orchestra.run.vm09.stderr:tar: Error is not recoverable: exiting now 2026-03-09T17:54:40.167 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-09T17:54:40.167 DEBUG:teuthology.orchestra.run.vm06:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-09T17:54:40.208 INFO:teuthology.orchestra.run.vm06.stdout:2026-03-09T17:51:58.948501+0000 mon.vm06 (mon.0) 492 : cluster [WRN] Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:54:40.208 WARNING:tasks.cephadm:Found errors (ERR|WRN|SEC) in cluster log 2026-03-09T17:54:40.208 DEBUG:teuthology.orchestra.run.vm06:> sudo egrep '\[SEC\]' /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-09T17:54:40.273 DEBUG:teuthology.orchestra.run.vm06:> sudo egrep '\[ERR\]' /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-09T17:54:40.337 DEBUG:teuthology.orchestra.run.vm06:> sudo egrep '\[WRN\]' /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-09T17:54:40.402 INFO:teuthology.orchestra.run.vm06.stdout:2026-03-09T17:51:58.948501+0000 mon.vm06 (mon.0) 492 : cluster [WRN] Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-09T17:54:40.402 INFO:tasks.cephadm:Compressing logs... 2026-03-09T17:54:40.402 DEBUG:teuthology.orchestra.run.vm06:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T17:54:40.444 DEBUG:teuthology.orchestra.run.vm09:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T17:54:40.463 INFO:teuthology.orchestra.run.vm06.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T17:54:40.463 INFO:teuthology.orchestra.run.vm06.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T17:54:40.464 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mon.vm06.log 2026-03-09T17:54:40.466 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mon.vm06.log: gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log 2026-03-09T17:54:40.466 INFO:teuthology.orchestra.run.vm09.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T17:54:40.466 INFO:teuthology.orchestra.run.vm09.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T17:54:40.467 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-volume.log 2026-03-09T17:54:40.468 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/cephadm.log: 91.6% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T17:54:40.468 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-client.ceph-exporter.vm09.log 2026-03-09T17:54:40.469 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mgr.vm09.lvfebd.log 2026-03-09T17:54:40.469 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-volume.log: /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-client.ceph-exporter.vm09.log: 28.6% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-client.ceph-exporter.vm09.log.gz 2026-03-09T17:54:40.469 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mon.vm09.log 2026-03-09T17:54:40.471 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mgr.vm06.shmhyl.log 2026-03-09T17:54:40.471 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log: 86.0% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log.gz 2026-03-09T17:54:40.471 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mgr.vm09.lvfebd.log: 95.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-volume.log.gz 2026-03-09T17:54:40.472 INFO:teuthology.orchestra.run.vm09.stderr: 90.9% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mgr.vm09.lvfebd.log.gz 2026-03-09T17:54:40.472 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.audit.log 2026-03-09T17:54:40.472 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mon.vm09.log: gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log 2026-03-09T17:54:40.473 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.audit.log: 90.3% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.audit.log.gz 2026-03-09T17:54:40.473 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.cephadm.log 2026-03-09T17:54:40.474 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log: 86.0% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.log.gz 2026-03-09T17:54:40.475 INFO:teuthology.orchestra.run.vm06.stderr: 92.3% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T17:54:40.475 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.audit.log 2026-03-09T17:54:40.479 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.0.log 2026-03-09T17:54:40.480 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.cephadm.log: 92.7% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.cephadm.log.gz 2026-03-09T17:54:40.480 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.2.log 2026-03-09T17:54:40.484 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mgr.vm06.shmhyl.log: gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.cephadm.log 2026-03-09T17:54:40.484 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.0.log: 94.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.0.log.gz 2026-03-09T17:54:40.484 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.4.log 2026-03-09T17:54:40.485 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.audit.log: 90.4% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.audit.log.gz 2026-03-09T17:54:40.488 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-volume.log 2026-03-09T17:54:40.488 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.2.log: 94.4% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.2.log.gz 2026-03-09T17:54:40.488 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.6.log 2026-03-09T17:54:40.490 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.cephadm.log: 92.0% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph.cephadm.log.gz 2026-03-09T17:54:40.492 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.4.log: 94.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.4.log.gz 2026-03-09T17:54:40.496 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-client.ceph-exporter.vm06.log 2026-03-09T17:54:40.496 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.6.log: 94.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.6.log.gz 2026-03-09T17:54:40.498 INFO:teuthology.orchestra.run.vm09.stderr: 92.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mon.vm09.log.gz 2026-03-09T17:54:40.499 INFO:teuthology.orchestra.run.vm09.stderr: 2026-03-09T17:54:40.500 INFO:teuthology.orchestra.run.vm09.stderr:real 0m0.043s 2026-03-09T17:54:40.500 INFO:teuthology.orchestra.run.vm09.stderr:user 0m0.050s 2026-03-09T17:54:40.500 INFO:teuthology.orchestra.run.vm09.stderr:sys 0m0.019s 2026-03-09T17:54:40.502 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-volume.log: 95.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-volume.log.gz 2026-03-09T17:54:40.504 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.1.log 2026-03-09T17:54:40.504 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-client.ceph-exporter.vm06.log: 90.8% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-client.ceph-exporter.vm06.log.gz 2026-03-09T17:54:40.508 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.3.log 2026-03-09T17:54:40.515 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.1.log: 94.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.1.log.gz 2026-03-09T17:54:40.516 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.5.log 2026-03-09T17:54:40.524 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.3.log: 94.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.3.log.gz 2026-03-09T17:54:40.525 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.7.log 2026-03-09T17:54:40.528 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.5.log: 94.6% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.5.log.gz 2026-03-09T17:54:40.532 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.7.log: 91.0% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mgr.vm06.shmhyl.log.gz 2026-03-09T17:54:40.535 INFO:teuthology.orchestra.run.vm06.stderr: 94.5% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-osd.7.log.gz 2026-03-09T17:54:40.588 INFO:teuthology.orchestra.run.vm06.stderr: 90.4% -- replaced with /var/log/ceph/588d7312-1be0-11f1-b5b6-61233c7d7c44/ceph-mon.vm06.log.gz 2026-03-09T17:54:40.590 INFO:teuthology.orchestra.run.vm06.stderr: 2026-03-09T17:54:40.590 INFO:teuthology.orchestra.run.vm06.stderr:real 0m0.135s 2026-03-09T17:54:40.590 INFO:teuthology.orchestra.run.vm06.stderr:user 0m0.189s 2026-03-09T17:54:40.590 INFO:teuthology.orchestra.run.vm06.stderr:sys 0m0.019s 2026-03-09T17:54:40.590 INFO:tasks.cephadm:Archiving logs... 2026-03-09T17:54:40.590 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592/remote/vm06/log 2026-03-09T17:54:40.590 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T17:54:40.665 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592/remote/vm09/log 2026-03-09T17:54:40.665 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T17:54:40.691 INFO:tasks.cephadm:Removing cluster... 2026-03-09T17:54:40.691 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 --force 2026-03-09T17:54:40.820 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:54:40.903 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 588d7312-1be0-11f1-b5b6-61233c7d7c44 --force 2026-03-09T17:54:41.022 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 588d7312-1be0-11f1-b5b6-61233c7d7c44 2026-03-09T17:54:41.104 INFO:tasks.cephadm:Removing cephadm ... 2026-03-09T17:54:41.104 DEBUG:teuthology.orchestra.run.vm06:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T17:54:41.118 DEBUG:teuthology.orchestra.run.vm09:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T17:54:41.132 INFO:tasks.cephadm:Teardown complete 2026-03-09T17:54:41.132 ERROR:teuthology.run_tasks:Saw exception from tasks. Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/run_tasks.py", line 112, in run_tasks manager.__enter__() File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks/cephadm.py", line 2216, in task with contextutil.nested( File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/teuthology/teuthology/contextutil.py", line 54, in nested raise exc[1] File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 153, in __exit__ self.gen.throw(typ, value, traceback) File "/home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks/cephadm.py", line 1845, in initialize_config yield File "/home/teuthos/teuthology/teuthology/contextutil.py", line 30, in nested vars.append(enter()) File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks/cephadm.py", line 1140, in ceph_osds while proceed(): File "/home/teuthos/teuthology/teuthology/contextutil.py", line 134, in __call__ raise MaxWhileTries(error_msg) teuthology.exceptions.MaxWhileTries: reached maximum tries (120) after waiting for 120 seconds 2026-03-09T17:54:41.132 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-09T17:54:41.135 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-03-09T17:54:41.136 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vdb... 2026-03-09T17:54:41.137 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n vdb 2026-03-09T17:54:41.249 INFO:teuthology.orchestra.run.vm06.stdout:NQN:vdb disconnected 1 controller(s) 2026-03-09T17:54:41.251 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vdc... 2026-03-09T17:54:41.251 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n vdc 2026-03-09T17:54:41.346 INFO:teuthology.orchestra.run.vm06.stdout:NQN:vdc disconnected 1 controller(s) 2026-03-09T17:54:41.347 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vdd... 2026-03-09T17:54:41.347 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n vdd 2026-03-09T17:54:41.431 INFO:teuthology.orchestra.run.vm06.stdout:NQN:vdd disconnected 1 controller(s) 2026-03-09T17:54:41.432 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vde... 2026-03-09T17:54:41.432 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n vde 2026-03-09T17:54:41.522 INFO:teuthology.orchestra.run.vm06.stdout:NQN:vde disconnected 1 controller(s) 2026-03-09T17:54:41.523 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:54:41.523 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/scratch_devs 2026-03-09T17:54:41.544 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vdb... 2026-03-09T17:54:41.544 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vdb 2026-03-09T17:54:41.646 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vdb disconnected 1 controller(s) 2026-03-09T17:54:41.647 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vdc... 2026-03-09T17:54:41.647 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vdc 2026-03-09T17:54:41.756 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vdc disconnected 1 controller(s) 2026-03-09T17:54:41.758 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vdd... 2026-03-09T17:54:41.758 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vdd 2026-03-09T17:54:41.854 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vdd disconnected 1 controller(s) 2026-03-09T17:54:41.856 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vde... 2026-03-09T17:54:41.856 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vde 2026-03-09T17:54:41.951 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vde disconnected 1 controller(s) 2026-03-09T17:54:41.953 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:54:41.953 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/scratch_devs 2026-03-09T17:54:41.974 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T17:54:41.976 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T17:54:41.976 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:54:41.978 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:54:41.990 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-09T17:54:42.029 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-09T17:54:42.156 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm09.stdout:^+ 141.84.43.73 2 6 377 47 -908us[ -908us] +/- 27ms 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm09.stdout:^+ 82.165.178.31 2 6 377 47 +1838us[+1838us] +/- 51ms 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm09.stdout:^+ node-4.infogral.is 2 6 377 47 -335us[ -335us] +/- 15ms 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm09.stdout:^* bond1-1201.fsn-lf-s02.pr> 2 6 377 48 -331us[ -421us] +/- 20ms 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm06.stdout:^* node-4.infogral.is 2 6 377 48 +301us[ +124us] +/- 15ms 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm06.stdout:^+ bond1-1201.fsn-lf-s02.pr> 2 6 377 47 +376us[ +376us] +/- 20ms 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm06.stdout:^+ 141.84.43.73 2 6 377 48 -562us[ -562us] +/- 26ms 2026-03-09T17:54:42.157 INFO:teuthology.orchestra.run.vm06.stdout:^- 82.165.178.31 2 6 377 48 +2566us[+2566us] +/- 51ms 2026-03-09T17:54:42.158 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T17:54:42.160 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T17:54:42.161 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T17:54:42.162 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T17:54:42.164 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T17:54:42.166 INFO:teuthology.task.internal:Duration was 454.054482 seconds 2026-03-09T17:54:42.166 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T17:54:42.168 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T17:54:42.168 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T17:54:42.200 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T17:54:42.233 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T17:54:42.235 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T17:54:42.627 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T17:54:42.627 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-09T17:54:42.628 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T17:54:42.691 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-03-09T17:54:42.691 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T17:54:42.713 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T17:54:42.714 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:54:42.733 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:54:43.247 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T17:54:43.247 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T17:54:43.248 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T17:54:43.270 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:54:43.270 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:54:43.270 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5/home/ubuntu/cephtest/archive/syslog/kern.log: --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:54:43.270 INFO:teuthology.orchestra.run.vm06.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T17:54:43.270 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T17:54:43.271 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:54:43.271 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:54:43.272 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T17:54:43.272 INFO:teuthology.orchestra.run.vm09.stderr: -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:54:43.272 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T17:54:43.434 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.1% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T17:54:43.437 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.4% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T17:54:43.439 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T17:54:43.441 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T17:54:43.441 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T17:54:43.499 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T17:54:43.525 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T17:54:43.527 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:54:43.541 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:54:43.566 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-09T17:54:43.589 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-03-09T17:54:43.604 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:54:43.631 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:54:43.631 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:54:43.658 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:54:43.659 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T17:54:43.661 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T17:54:43.661 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592/remote/vm06 2026-03-09T17:54:43.661 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T17:54:43.698 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/592/remote/vm09 2026-03-09T17:54:43.698 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T17:54:43.725 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T17:54:43.725 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T17:54:43.740 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T17:54:43.780 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T17:54:43.782 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T17:54:43.782 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T17:54:43.785 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T17:54:43.785 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T17:54:43.795 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T17:54:43.809 INFO:teuthology.orchestra.run.vm06.stdout: 8532139 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 17:54 /home/ubuntu/cephtest 2026-03-09T17:54:43.836 INFO:teuthology.orchestra.run.vm09.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 17:54 /home/ubuntu/cephtest 2026-03-09T17:54:43.837 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T17:54:43.843 INFO:teuthology.run:Summary data: description: orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} duration: 454.05448150634766 failure_reason: '"2026-03-09T17:51:58.948501+0000 mon.vm06 (mon.0) 492 : cluster [WRN] Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL)" in cluster log' owner: kyr sentry_event: null status: fail success: false 2026-03-09T17:54:43.843 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T17:54:43.861 INFO:teuthology.run:FAIL