2026-03-05T23:38:50.961 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-05T23:38:50.966 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-05T23:38:50.984 INFO:teuthology.run:Config: archive_path: /archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100 branch: cobaltcore-storage-v19.2.3-fasttrack-3 description: orch:cephadm:osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate} email: null first_in_suite: false flavor: default job_id: '100' last_in_suite: false machine_type: vps name: irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: cobaltcore-storage-v19.2.3-fasttrack-3 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: Europe/Berlin ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - OSD_DOWN - CEPHADM_FAILED_DAEMON - but is still running - PG_DEGRADED log-only-match: - CEPHADM_ sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2 ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 install: ceph: flavor: default sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2 extra_system_packages: deb: - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/x86_64 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-19.2.3-fasttrack-3 sha1: e50baef5944c0b5e8e734db1c467f1f19415a932 owner: irq0 priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 7195 sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2 sleep_before_teardown: 0 suite: orch:cephadm:osds suite_branch: tt-19.2.3-fasttrack-3 suite_path: /home/teuthos/src/github.com_kshtsk_ceph_e50baef5944c0b5e8e734db1c467f1f19415a932/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: e50baef5944c0b5e8e734db1c467f1f19415a932 targets: vm02.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFF83d7VV6ISras4yIbC0lbp2+tQ76MYs1fOfEh18toodOabX1IcQ7APHnFAFiIkDyKt2h48TAF5rmB4KqENY6w= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLMIEILkT+UyKTdmmvON8FU9C/bnK0KknVOFOxDBkBB+gf+EeN6e82y+NGliB0SE66ONQ3qH9ztcdJYF5teE9Y4= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' - cephadm.shell: host.a: - 'set -e set -x ceph orch ps HOST=$(hostname -s) OSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk ''{print $1}'') echo "host $HOST, osd $OSD" ceph orch daemon stop $OSD while ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done ceph auth export $OSD > k ceph orch daemon rm $OSD --force ceph orch ps --refresh while ceph orch ps | grep $OSD ; do sleep 5 ; done ceph auth add $OSD -i k ceph cephadm osd activate $HOST while ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done ' - cephadm.healthy: null teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-05_18:18:00 tube: vps user: irq0 verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.43333 2026-03-05T23:38:50.984 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_e50baef5944c0b5e8e734db1c467f1f19415a932/qa; will attempt to use it 2026-03-05T23:38:50.985 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_e50baef5944c0b5e8e734db1c467f1f19415a932/qa/tasks 2026-03-05T23:38:50.985 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-05T23:38:50.985 INFO:teuthology.task.internal:Saving configuration 2026-03-05T23:38:50.989 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-05T23:38:50.990 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-05T23:38:50.997 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm02.local', 'description': '/archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-05 22:37:25.289000', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:02', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFF83d7VV6ISras4yIbC0lbp2+tQ76MYs1fOfEh18toodOabX1IcQ7APHnFAFiIkDyKt2h48TAF5rmB4KqENY6w='} 2026-03-05T23:38:51.002 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-05 22:37:25.288587', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLMIEILkT+UyKTdmmvON8FU9C/bnK0KknVOFOxDBkBB+gf+EeN6e82y+NGliB0SE66ONQ3qH9ztcdJYF5teE9Y4='} 2026-03-05T23:38:51.002 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-05T23:38:51.003 INFO:teuthology.task.internal:roles: ubuntu@vm02.local - ['host.a', 'client.0'] 2026-03-05T23:38:51.003 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['host.b', 'client.1'] 2026-03-05T23:38:51.003 INFO:teuthology.run_tasks:Running task console_log... 2026-03-05T23:38:51.008 DEBUG:teuthology.task.console_log:vm02 does not support IPMI; excluding 2026-03-05T23:38:51.014 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-03-05T23:38:51.014 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f89117a7d00>, signals=[15]) 2026-03-05T23:38:51.014 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-05T23:38:51.014 INFO:teuthology.task.internal:Opening connections... 2026-03-05T23:38:51.014 DEBUG:teuthology.task.internal:connecting to ubuntu@vm02.local 2026-03-05T23:38:51.015 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm02.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-05T23:38:51.073 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-03-05T23:38:51.074 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-05T23:38:51.133 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-05T23:38:51.134 DEBUG:teuthology.orchestra.run.vm02:> uname -m 2026-03-05T23:38:51.182 INFO:teuthology.orchestra.run.vm02.stdout:x86_64 2026-03-05T23:38:51.182 DEBUG:teuthology.orchestra.run.vm02:> cat /etc/os-release 2026-03-05T23:38:51.237 INFO:teuthology.orchestra.run.vm02.stdout:NAME="CentOS Stream" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:VERSION="9" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:ID="centos" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:ID_LIKE="rhel fedora" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:VERSION_ID="9" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:PLATFORM_ID="platform:el9" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:ANSI_COLOR="0;31" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:LOGO="fedora-logo-icon" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:HOME_URL="https://centos.org/" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-05T23:38:51.238 INFO:teuthology.orchestra.run.vm02.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-05T23:38:51.238 INFO:teuthology.lock.ops:Updating vm02.local on lock server 2026-03-05T23:38:51.243 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-03-05T23:38:51.259 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-03-05T23:38:51.259 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:NAME="CentOS Stream" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="9" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:ID="centos" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE="rhel fedora" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="9" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:PLATFORM_ID="platform:el9" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:ANSI_COLOR="0;31" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:LOGO="fedora-logo-icon" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://centos.org/" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-05T23:38:51.314 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-05T23:38:51.315 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-03-05T23:38:51.319 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-05T23:38:51.322 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-05T23:38:51.323 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-05T23:38:51.323 DEBUG:teuthology.orchestra.run.vm02:> test '!' -e /home/ubuntu/cephtest 2026-03-05T23:38:51.325 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-03-05T23:38:51.369 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-05T23:38:51.370 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-05T23:38:51.370 DEBUG:teuthology.orchestra.run.vm02:> test -z $(ls -A /var/lib/ceph) 2026-03-05T23:38:51.379 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-03-05T23:38:51.395 INFO:teuthology.orchestra.run.vm02.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-05T23:38:51.426 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-05T23:38:51.426 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-05T23:38:51.434 DEBUG:teuthology.orchestra.run.vm02:> test -e /ceph-qa-ready 2026-03-05T23:38:51.450 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:38:51.633 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-03-05T23:38:51.648 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:38:51.832 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-05T23:38:51.833 INFO:teuthology.task.internal:Creating test directory... 2026-03-05T23:38:51.833 DEBUG:teuthology.orchestra.run.vm02:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-05T23:38:51.836 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-05T23:38:51.853 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-05T23:38:51.854 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-05T23:38:51.855 INFO:teuthology.task.internal:Creating archive directory... 2026-03-05T23:38:51.855 DEBUG:teuthology.orchestra.run.vm02:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-05T23:38:51.895 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-05T23:38:51.916 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-05T23:38:51.917 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-05T23:38:51.917 DEBUG:teuthology.orchestra.run.vm02:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-05T23:38:51.966 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:38:51.966 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-05T23:38:51.983 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:38:51.983 DEBUG:teuthology.orchestra.run.vm02:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-05T23:38:52.008 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-05T23:38:52.032 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-05T23:38:52.041 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-05T23:38:52.051 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-05T23:38:52.060 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-05T23:38:52.061 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-05T23:38:52.062 INFO:teuthology.task.internal:Configuring sudo... 2026-03-05T23:38:52.062 DEBUG:teuthology.orchestra.run.vm02:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-05T23:38:52.084 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-05T23:38:52.126 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-05T23:38:52.128 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-05T23:38:52.128 DEBUG:teuthology.orchestra.run.vm02:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-05T23:38:52.150 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-05T23:38:52.181 DEBUG:teuthology.orchestra.run.vm02:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-05T23:38:52.227 DEBUG:teuthology.orchestra.run.vm02:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-05T23:38:52.281 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:38:52.282 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-05T23:38:52.342 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-05T23:38:52.363 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-05T23:38:52.420 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:38:52.420 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-05T23:38:52.478 DEBUG:teuthology.orchestra.run.vm02:> sudo service rsyslog restart 2026-03-05T23:38:52.480 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-03-05T23:38:52.509 INFO:teuthology.orchestra.run.vm02.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-05T23:38:52.545 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-05T23:38:52.965 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-05T23:38:52.967 INFO:teuthology.task.internal:Starting timer... 2026-03-05T23:38:52.967 INFO:teuthology.run_tasks:Running task pcp... 2026-03-05T23:38:52.969 INFO:teuthology.run_tasks:Running task selinux... 2026-03-05T23:38:52.972 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-05T23:38:52.972 INFO:teuthology.task.selinux:Excluding vm02: VMs are not yet supported 2026-03-05T23:38:52.972 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-03-05T23:38:52.972 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-05T23:38:52.972 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-05T23:38:52.972 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-05T23:38:52.972 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-05T23:38:52.974 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'Europe/Berlin'}} 2026-03-05T23:38:52.975 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-03-05T23:38:52.976 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-03-05T23:38:53.455 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-03-05T23:38:53.461 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-05T23:38:53.461 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "Europe/Berlin"}' -i /tmp/teuth_ansible_inventoryul3b_98a --limit vm02.local,vm09.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-05T23:44:22.995 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm02.local'), Remote(name='ubuntu@vm09.local')] 2026-03-05T23:44:22.996 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm02.local' 2026-03-05T23:44:22.996 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm02.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-05T23:44:23.057 DEBUG:teuthology.orchestra.run.vm02:> true 2026-03-05T23:44:23.132 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm02.local' 2026-03-05T23:44:23.132 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-03-05T23:44:23.133 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-05T23:44:23.199 DEBUG:teuthology.orchestra.run.vm09:> true 2026-03-05T23:44:23.277 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-03-05T23:44:23.277 INFO:teuthology.run_tasks:Running task clock... 2026-03-05T23:44:23.280 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-05T23:44:23.280 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-05T23:44:23.280 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-05T23:44:23.281 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-05T23:44:23.282 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-05T23:44:23.329 INFO:teuthology.orchestra.run.vm02.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-05T23:44:23.351 INFO:teuthology.orchestra.run.vm02.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-05T23:44:23.353 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-05T23:44:23.373 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-05T23:44:23.383 INFO:teuthology.orchestra.run.vm02.stderr:sudo: ntpd: command not found 2026-03-05T23:44:23.401 INFO:teuthology.orchestra.run.vm02.stdout:506 Cannot talk to daemon 2026-03-05T23:44:23.402 INFO:teuthology.orchestra.run.vm09.stderr:sudo: ntpd: command not found 2026-03-05T23:44:23.415 INFO:teuthology.orchestra.run.vm09.stdout:506 Cannot talk to daemon 2026-03-05T23:44:23.425 INFO:teuthology.orchestra.run.vm02.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-05T23:44:23.435 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-05T23:44:23.445 INFO:teuthology.orchestra.run.vm02.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-05T23:44:23.453 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-05T23:44:23.499 INFO:teuthology.orchestra.run.vm02.stderr:bash: line 1: ntpq: command not found 2026-03-05T23:44:23.501 INFO:teuthology.orchestra.run.vm02.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-05T23:44:23.501 INFO:teuthology.orchestra.run.vm02.stdout:=============================================================================== 2026-03-05T23:44:23.512 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-05T23:44:23.514 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-05T23:44:23.514 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-05T23:44:23.515 INFO:teuthology.run_tasks:Running task pexec... 2026-03-05T23:44:23.518 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-05T23:44:23.518 DEBUG:teuthology.orchestra.run.vm02:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-05T23:44:23.518 DEBUG:teuthology.orchestra.run.vm09:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-05T23:44:23.521 DEBUG:teuthology.task.pexec:ubuntu@vm02.local< sudo dnf remove nvme-cli -y 2026-03-05T23:44:23.521 DEBUG:teuthology.task.pexec:ubuntu@vm02.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-05T23:44:23.521 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm02.local 2026-03-05T23:44:23.521 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-05T23:44:23.521 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-05T23:44:23.557 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf remove nvme-cli -y 2026-03-05T23:44:23.557 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-05T23:44:23.557 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm09.local 2026-03-05T23:44:23.557 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-05T23:44:23.557 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-05T23:44:23.749 INFO:teuthology.orchestra.run.vm09.stdout:No match for argument: nvme-cli 2026-03-05T23:44:23.749 INFO:teuthology.orchestra.run.vm09.stderr:No packages marked for removal. 2026-03-05T23:44:23.753 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-05T23:44:23.753 INFO:teuthology.orchestra.run.vm09.stdout:Nothing to do. 2026-03-05T23:44:23.753 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-05T23:44:23.775 INFO:teuthology.orchestra.run.vm02.stdout:No match for argument: nvme-cli 2026-03-05T23:44:23.775 INFO:teuthology.orchestra.run.vm02.stderr:No packages marked for removal. 2026-03-05T23:44:23.778 INFO:teuthology.orchestra.run.vm02.stdout:Dependencies resolved. 2026-03-05T23:44:23.779 INFO:teuthology.orchestra.run.vm02.stdout:Nothing to do. 2026-03-05T23:44:23.779 INFO:teuthology.orchestra.run.vm02.stdout:Complete! 2026-03-05T23:44:24.175 INFO:teuthology.orchestra.run.vm09.stdout:Last metadata expiration check: 0:04:59 ago on Thu 05 Mar 2026 11:39:25 PM CET. 2026-03-05T23:44:24.255 INFO:teuthology.orchestra.run.vm02.stdout:Last metadata expiration check: 0:01:08 ago on Thu 05 Mar 2026 11:43:16 PM CET. 2026-03-05T23:44:24.282 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: Package Architecture Version Repository Size 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:Installing: 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:Installing dependencies: 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:Transaction Summary 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:Install 6 Packages 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:Total download size: 2.3 M 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:Installed size: 11 M 2026-03-05T23:44:24.283 INFO:teuthology.orchestra.run.vm09.stdout:Downloading Packages: 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:Dependencies resolved. 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:================================================================================ 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: Package Architecture Version Repository Size 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:================================================================================ 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:Installing: 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:Installing dependencies: 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:Transaction Summary 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:================================================================================ 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout:Install 6 Packages 2026-03-05T23:44:24.392 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:44:24.393 INFO:teuthology.orchestra.run.vm02.stdout:Total download size: 2.3 M 2026-03-05T23:44:24.393 INFO:teuthology.orchestra.run.vm02.stdout:Installed size: 11 M 2026-03-05T23:44:24.393 INFO:teuthology.orchestra.run.vm02.stdout:Downloading Packages: 2026-03-05T23:44:24.919 INFO:teuthology.orchestra.run.vm09.stdout:(1/6): python3-configshell-1.1.30-1.el9.noarch. 451 kB/s | 72 kB 00:00 2026-03-05T23:44:24.944 INFO:teuthology.orchestra.run.vm09.stdout:(2/6): nvmetcli-0.8-3.el9.noarch.rpm 238 kB/s | 44 kB 00:00 2026-03-05T23:44:24.962 INFO:teuthology.orchestra.run.vm09.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 1.9 MB/s | 84 kB 00:00 2026-03-05T23:44:25.008 INFO:teuthology.orchestra.run.vm09.stdout:(4/6): nvme-cli-2.16-1.el9.x86_64.rpm 4.7 MB/s | 1.2 MB 00:00 2026-03-05T23:44:25.013 INFO:teuthology.orchestra.run.vm09.stdout:(5/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.1 MB/s | 150 kB 00:00 2026-03-05T23:44:25.065 INFO:teuthology.orchestra.run.vm09.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 8.0 MB/s | 837 kB 00:00 2026-03-05T23:44:25.065 INFO:teuthology.orchestra.run.vm09.stdout:-------------------------------------------------------------------------------- 2026-03-05T23:44:25.065 INFO:teuthology.orchestra.run.vm09.stdout:Total 3.0 MB/s | 2.3 MB 00:00 2026-03-05T23:44:25.135 INFO:teuthology.orchestra.run.vm02.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 210 kB/s | 44 kB 00:00 2026-03-05T23:44:25.136 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction check 2026-03-05T23:44:25.145 INFO:teuthology.orchestra.run.vm09.stdout:Transaction check succeeded. 2026-03-05T23:44:25.146 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction test 2026-03-05T23:44:25.161 INFO:teuthology.orchestra.run.vm02.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 307 kB/s | 72 kB 00:00 2026-03-05T23:44:25.198 INFO:teuthology.orchestra.run.vm02.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 1.3 MB/s | 84 kB 00:00 2026-03-05T23:44:25.204 INFO:teuthology.orchestra.run.vm09.stdout:Transaction test succeeded. 2026-03-05T23:44:25.204 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction 2026-03-05T23:44:25.235 INFO:teuthology.orchestra.run.vm02.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.0 MB/s | 150 kB 00:00 2026-03-05T23:44:25.240 INFO:teuthology.orchestra.run.vm02.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 3.7 MB/s | 1.2 MB 00:00 2026-03-05T23:44:25.345 INFO:teuthology.orchestra.run.vm02.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 5.6 MB/s | 837 kB 00:00 2026-03-05T23:44:25.346 INFO:teuthology.orchestra.run.vm02.stdout:-------------------------------------------------------------------------------- 2026-03-05T23:44:25.346 INFO:teuthology.orchestra.run.vm02.stdout:Total 2.4 MB/s | 2.3 MB 00:00 2026-03-05T23:44:25.363 INFO:teuthology.orchestra.run.vm09.stdout: Preparing : 1/1 2026-03-05T23:44:25.375 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-05T23:44:25.387 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-05T23:44:25.394 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-05T23:44:25.402 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-05T23:44:25.404 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-05T23:44:25.421 INFO:teuthology.orchestra.run.vm02.stdout:Running transaction check 2026-03-05T23:44:25.433 INFO:teuthology.orchestra.run.vm02.stdout:Transaction check succeeded. 2026-03-05T23:44:25.433 INFO:teuthology.orchestra.run.vm02.stdout:Running transaction test 2026-03-05T23:44:25.500 INFO:teuthology.orchestra.run.vm02.stdout:Transaction test succeeded. 2026-03-05T23:44:25.501 INFO:teuthology.orchestra.run.vm02.stdout:Running transaction 2026-03-05T23:44:25.567 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-05T23:44:25.571 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-05T23:44:25.703 INFO:teuthology.orchestra.run.vm02.stdout: Preparing : 1/1 2026-03-05T23:44:25.718 INFO:teuthology.orchestra.run.vm02.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-05T23:44:25.736 INFO:teuthology.orchestra.run.vm02.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-05T23:44:25.743 INFO:teuthology.orchestra.run.vm02.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-05T23:44:25.755 INFO:teuthology.orchestra.run.vm02.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-05T23:44:25.756 INFO:teuthology.orchestra.run.vm02.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-05T23:44:25.974 INFO:teuthology.orchestra.run.vm02.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-05T23:44:25.976 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-05T23:44:25.976 INFO:teuthology.orchestra.run.vm09.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-05T23:44:25.976 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:44:25.981 INFO:teuthology.orchestra.run.vm02.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-05T23:44:26.448 INFO:teuthology.orchestra.run.vm02.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-05T23:44:26.448 INFO:teuthology.orchestra.run.vm02.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-05T23:44:26.448 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:44:26.519 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-05T23:44:26.520 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-05T23:44:26.520 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-05T23:44:26.520 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-05T23:44:26.520 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-05T23:44:26.616 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-05T23:44:26.616 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:44:26.616 INFO:teuthology.orchestra.run.vm09.stdout:Installed: 2026-03-05T23:44:26.617 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-05T23:44:26.617 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-05T23:44:26.617 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-05T23:44:26.617 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:44:26.617 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-05T23:44:26.713 DEBUG:teuthology.parallel:result is None 2026-03-05T23:44:26.998 INFO:teuthology.orchestra.run.vm02.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-05T23:44:26.999 INFO:teuthology.orchestra.run.vm02.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-05T23:44:26.999 INFO:teuthology.orchestra.run.vm02.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-05T23:44:26.999 INFO:teuthology.orchestra.run.vm02.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-05T23:44:26.999 INFO:teuthology.orchestra.run.vm02.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout:Installed: 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:44:27.080 INFO:teuthology.orchestra.run.vm02.stdout:Complete! 2026-03-05T23:44:27.147 DEBUG:teuthology.parallel:result is None 2026-03-05T23:44:27.147 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-03-05T23:44:27.150 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-03-05T23:44:27.151 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:44:27.151 DEBUG:teuthology.orchestra.run.vm02:> dd if=/scratch_devs of=/dev/stdout 2026-03-05T23:44:27.180 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:44:27.180 DEBUG:teuthology.orchestra.run.vm02:> ls /dev/[sv]d? 2026-03-05T23:44:27.247 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vda 2026-03-05T23:44:27.247 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdb 2026-03-05T23:44:27.247 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdc 2026-03-05T23:44:27.248 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdd 2026-03-05T23:44:27.248 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vde 2026-03-05T23:44:27.248 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-05T23:44:27.248 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-05T23:44:27.248 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdb 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdb 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:44:26.510921278 +0100 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:26.510921278 +0100 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:26.510921278 +0100 2026-03-05T23:44:27.315 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:38:20.248000000 +0100 2026-03-05T23:44:27.315 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-05T23:44:27.407 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:44:27.407 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:44:27.407 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000397415 s, 1.3 MB/s 2026-03-05T23:44:27.408 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-05T23:44:27.439 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdc 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdc 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 224 Links: 1 Device type: fc,20 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:44:26.547921291 +0100 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:26.547921291 +0100 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:26.547921291 +0100 2026-03-05T23:44:27.515 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:38:20.252000000 +0100 2026-03-05T23:44:27.515 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-05T23:44:27.609 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:44:27.609 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:44:27.609 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.00018718 s, 2.7 MB/s 2026-03-05T23:44:27.610 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-05T23:44:27.632 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdd 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdd 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 225 Links: 1 Device type: fc,30 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:44:26.515921280 +0100 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:26.515921280 +0100 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:26.515921280 +0100 2026-03-05T23:44:27.701 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:38:20.258000000 +0100 2026-03-05T23:44:27.702 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-05T23:44:27.776 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:44:27.776 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:44:27.776 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000151744 s, 3.4 MB/s 2026-03-05T23:44:27.777 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-05T23:44:27.835 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vde 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vde 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 251 Links: 1 Device type: fc,40 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:44:26.513921279 +0100 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:26.513921279 +0100 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:26.513921279 +0100 2026-03-05T23:44:27.898 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:38:20.267000000 +0100 2026-03-05T23:44:27.898 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-05T23:44:27.977 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:44:27.977 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:44:27.977 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000185007 s, 2.8 MB/s 2026-03-05T23:44:27.978 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-05T23:44:28.043 DEBUG:teuthology.orchestra.run.vm02:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-03-05T23:44:28.211 INFO:teuthology.orchestra.run.vm02.stdout:loop 2026-03-05T23:44:28.212 INFO:tasks.nvme_loop:Connecting nvme_loop vm02:/dev/vdb... 2026-03-05T23:44:28.212 DEBUG:teuthology.orchestra.run.vm02:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1 && echo -n /dev/vdb | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdb /sys/kernel/config/nvmet/ports/1/subsystems/vdb && sudo nvme connect -t loop -n vdb -q hostnqn 2026-03-05T23:44:28.263 INFO:teuthology.orchestra.run.vm02.stdout:1 2026-03-05T23:44:28.301 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdb1 2026-03-05T23:44:28.333 INFO:teuthology.orchestra.run.vm02.stdout:connecting to device: nvme0 2026-03-05T23:44:28.335 INFO:tasks.nvme_loop:Connecting nvme_loop vm02:/dev/vdc... 2026-03-05T23:44:28.335 DEBUG:teuthology.orchestra.run.vm02:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1 && echo -n /dev/vdc | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdc /sys/kernel/config/nvmet/ports/1/subsystems/vdc && sudo nvme connect -t loop -n vdc -q hostnqn 2026-03-05T23:44:28.384 INFO:teuthology.orchestra.run.vm02.stdout:1 2026-03-05T23:44:28.419 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdc1 2026-03-05T23:44:28.450 INFO:teuthology.orchestra.run.vm02.stdout:connecting to device: nvme1 2026-03-05T23:44:28.454 INFO:tasks.nvme_loop:Connecting nvme_loop vm02:/dev/vdd... 2026-03-05T23:44:28.454 DEBUG:teuthology.orchestra.run.vm02:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1 && echo -n /dev/vdd | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdd /sys/kernel/config/nvmet/ports/1/subsystems/vdd && sudo nvme connect -t loop -n vdd -q hostnqn 2026-03-05T23:44:28.503 INFO:teuthology.orchestra.run.vm02.stdout:1 2026-03-05T23:44:28.546 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdd1 2026-03-05T23:44:28.585 INFO:teuthology.orchestra.run.vm02.stdout:connecting to device: nvme2 2026-03-05T23:44:28.590 INFO:tasks.nvme_loop:Connecting nvme_loop vm02:/dev/vde... 2026-03-05T23:44:28.590 DEBUG:teuthology.orchestra.run.vm02:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde/namespaces/1 && echo -n /dev/vde | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vde /sys/kernel/config/nvmet/ports/1/subsystems/vde && sudo nvme connect -t loop -n vde -q hostnqn 2026-03-05T23:44:28.640 INFO:teuthology.orchestra.run.vm02.stdout:1 2026-03-05T23:44:28.675 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vde1 2026-03-05T23:44:28.710 INFO:teuthology.orchestra.run.vm02.stdout:connecting to device: nvme3 2026-03-05T23:44:28.711 DEBUG:teuthology.orchestra.run.vm02:> lsblk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:sr0 11:0 1 366K 0 rom 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:vda 252:0 0 40G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:└─vda1 252:1 0 40G 0 part / 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:vdb 252:16 0 20G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:vdc 252:32 0 20G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:vdd 252:48 0 20G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:vde 252:64 0 20G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:nvme0n1 259:1 0 20G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:nvme1n1 259:3 0 20G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:nvme2n1 259:5 0 20G 0 disk 2026-03-05T23:44:28.745 INFO:teuthology.orchestra.run.vm02.stdout:nvme3n1 259:7 0 20G 0 disk 2026-03-05T23:44:28.746 DEBUG:teuthology.orchestra.run.vm02:> sudo nvme list -o json 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "Devices":[ 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "NameSpace":1, 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "DevicePath":"/dev/nvme0n1", 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "GenericPath":"/dev/ng0n1", 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "SerialNumber":"e18a0d8fcd3e7a054808", 2026-03-05T23:44:28.832 INFO:teuthology.orchestra.run.vm02.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "SectorSize":512 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "NameSpace":1, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "DevicePath":"/dev/nvme1n1", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "GenericPath":"/dev/ng1n1", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "SerialNumber":"6bd7b4541227a763c307", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "SectorSize":512 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "NameSpace":1, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "DevicePath":"/dev/nvme2n1", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "GenericPath":"/dev/ng2n1", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "SerialNumber":"562a6caece83ca09b9ff", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "SectorSize":512 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "NameSpace":1, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "DevicePath":"/dev/nvme3n1", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "GenericPath":"/dev/ng3n1", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "SerialNumber":"36ecd4d8f1a9c5e4ec41", 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: "SectorSize":512 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout: ] 2026-03-05T23:44:28.833 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-05T23:44:28.834 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-03-05T23:44:28.878 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:28.878 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:28.878 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00868825 s, 471 kB/s 2026-03-05T23:44:28.879 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-03-05T23:44:28.920 INFO:teuthology.orchestra.run.vm02.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:28.920 INFO:teuthology.orchestra.run.vm02.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:28.920 INFO:teuthology.orchestra.run.vm02.stdout:00000016 2026-03-05T23:44:28.923 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:29.009 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:29.010 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:29.010 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00870236 s, 471 kB/s 2026-03-05T23:44:29.014 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-03-05T23:44:29.050 INFO:teuthology.orchestra.run.vm02.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:29.050 INFO:teuthology.orchestra.run.vm02.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:29.050 INFO:teuthology.orchestra.run.vm02.stdout:40000016 2026-03-05T23:44:29.052 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:29.132 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:29.133 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:29.133 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00405108 s, 1.0 MB/s 2026-03-05T23:44:29.133 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-03-05T23:44:29.208 INFO:teuthology.orchestra.run.vm02.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:29.208 INFO:teuthology.orchestra.run.vm02.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:29.208 INFO:teuthology.orchestra.run.vm02.stdout:280000016 2026-03-05T23:44:29.209 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-03-05T23:44:29.304 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:29.305 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:29.305 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0106975 s, 383 kB/s 2026-03-05T23:44:29.306 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-03-05T23:44:29.346 INFO:teuthology.orchestra.run.vm02.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:29.346 INFO:teuthology.orchestra.run.vm02.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:29.346 INFO:teuthology.orchestra.run.vm02.stdout:00000016 2026-03-05T23:44:29.347 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:29.427 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:29.427 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:29.427 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00440421 s, 930 kB/s 2026-03-05T23:44:29.428 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-03-05T23:44:29.499 INFO:teuthology.orchestra.run.vm02.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:29.499 INFO:teuthology.orchestra.run.vm02.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:29.499 INFO:teuthology.orchestra.run.vm02.stdout:40000016 2026-03-05T23:44:29.500 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:29.588 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:29.588 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:29.588 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0102924 s, 398 kB/s 2026-03-05T23:44:29.590 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-03-05T23:44:29.620 INFO:teuthology.orchestra.run.vm02.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:29.620 INFO:teuthology.orchestra.run.vm02.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:29.620 INFO:teuthology.orchestra.run.vm02.stdout:280000016 2026-03-05T23:44:29.621 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-03-05T23:44:29.701 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:29.701 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:29.701 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0040072 s, 1.0 MB/s 2026-03-05T23:44:29.702 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-03-05T23:44:29.781 INFO:teuthology.orchestra.run.vm02.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:29.782 INFO:teuthology.orchestra.run.vm02.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:29.782 INFO:teuthology.orchestra.run.vm02.stdout:00000016 2026-03-05T23:44:29.783 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:29.861 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:29.861 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:29.861 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00386154 s, 1.1 MB/s 2026-03-05T23:44:29.864 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-03-05T23:44:29.936 INFO:teuthology.orchestra.run.vm02.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:29.937 INFO:teuthology.orchestra.run.vm02.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:29.937 INFO:teuthology.orchestra.run.vm02.stdout:40000016 2026-03-05T23:44:29.938 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:30.020 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:30.020 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:30.020 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00834932 s, 491 kB/s 2026-03-05T23:44:30.021 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-03-05T23:44:30.108 INFO:teuthology.orchestra.run.vm02.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:30.108 INFO:teuthology.orchestra.run.vm02.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:30.108 INFO:teuthology.orchestra.run.vm02.stdout:280000016 2026-03-05T23:44:30.109 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-03-05T23:44:30.148 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:30.148 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:30.148 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00716339 s, 572 kB/s 2026-03-05T23:44:30.149 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-03-05T23:44:30.221 INFO:teuthology.orchestra.run.vm02.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:30.221 INFO:teuthology.orchestra.run.vm02.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:30.221 INFO:teuthology.orchestra.run.vm02.stdout:00000016 2026-03-05T23:44:30.222 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:30.297 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:30.297 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:30.297 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00517714 s, 791 kB/s 2026-03-05T23:44:30.300 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-03-05T23:44:30.373 INFO:teuthology.orchestra.run.vm02.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:30.373 INFO:teuthology.orchestra.run.vm02.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:30.373 INFO:teuthology.orchestra.run.vm02.stdout:40000016 2026-03-05T23:44:30.375 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:30.451 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records in 2026-03-05T23:44:30.451 INFO:teuthology.orchestra.run.vm02.stderr:4096+0 records out 2026-03-05T23:44:30.451 INFO:teuthology.orchestra.run.vm02.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00421839 s, 971 kB/s 2026-03-05T23:44:30.452 DEBUG:teuthology.orchestra.run.vm02:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-03-05T23:44:30.524 INFO:teuthology.orchestra.run.vm02.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:30.524 INFO:teuthology.orchestra.run.vm02.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:30.524 INFO:teuthology.orchestra.run.vm02.stdout:280000016 2026-03-05T23:44:30.525 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-05T23:44:30.525 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:44:30.525 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/scratch_devs 2026-03-05T23:44:30.607 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:44:30.607 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-03-05T23:44:30.630 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:44:30.630 DEBUG:teuthology.orchestra.run.vm09:> ls /dev/[sv]d? 2026-03-05T23:44:30.693 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vda 2026-03-05T23:44:30.693 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdb 2026-03-05T23:44:30.693 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdc 2026-03-05T23:44:30.693 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdd 2026-03-05T23:44:30.693 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vde 2026-03-05T23:44:30.693 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-05T23:44:30.693 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-05T23:44:30.693 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdb 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdb 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:44:26.050144106 +0100 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:26.050144106 +0100 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:26.050144106 +0100 2026-03-05T23:44:30.761 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:37:32.274000000 +0100 2026-03-05T23:44:30.761 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-05T23:44:30.837 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:44:30.837 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:44:30.837 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000135593 s, 3.8 MB/s 2026-03-05T23:44:30.838 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-05T23:44:30.903 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdc 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdc 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:44:26.045144104 +0100 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:26.045144104 +0100 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:26.045144104 +0100 2026-03-05T23:44:30.966 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:37:32.279000000 +0100 2026-03-05T23:44:30.966 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-05T23:44:31.038 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:44:31.039 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:44:31.039 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.00015487 s, 3.3 MB/s 2026-03-05T23:44:31.040 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-05T23:44:31.108 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdd 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdd 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:44:26.077144115 +0100 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:26.077144115 +0100 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:26.077144115 +0100 2026-03-05T23:44:31.179 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:37:32.290000000 +0100 2026-03-05T23:44:31.179 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-05T23:44:31.249 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:44:31.249 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:44:31.249 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.00014386 s, 3.6 MB/s 2026-03-05T23:44:31.251 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-05T23:44:31.315 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vde 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vde 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:44:26.032144099 +0100 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:26.032144099 +0100 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:26.032144099 +0100 2026-03-05T23:44:31.376 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:37:32.294000000 +0100 2026-03-05T23:44:31.376 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-05T23:44:31.446 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:44:31.447 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:44:31.447 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000135133 s, 3.8 MB/s 2026-03-05T23:44:31.448 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-05T23:44:31.515 DEBUG:teuthology.orchestra.run.vm09:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-03-05T23:44:31.673 INFO:teuthology.orchestra.run.vm09.stdout:loop 2026-03-05T23:44:31.675 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vdb... 2026-03-05T23:44:31.675 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1 && echo -n /dev/vdb | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdb/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdb /sys/kernel/config/nvmet/ports/1/subsystems/vdb && sudo nvme connect -t loop -n vdb -q hostnqn 2026-03-05T23:44:31.713 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-05T23:44:31.748 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdb1 2026-03-05T23:44:31.782 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme0 2026-03-05T23:44:31.786 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vdc... 2026-03-05T23:44:31.786 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1 && echo -n /dev/vdc | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdc/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdc /sys/kernel/config/nvmet/ports/1/subsystems/vdc && sudo nvme connect -t loop -n vdc -q hostnqn 2026-03-05T23:44:31.874 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-05T23:44:31.910 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdc1 2026-03-05T23:44:31.944 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme1 2026-03-05T23:44:31.947 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vdd... 2026-03-05T23:44:31.947 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1 && echo -n /dev/vdd | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vdd/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vdd /sys/kernel/config/nvmet/ports/1/subsystems/vdd && sudo nvme connect -t loop -n vdd -q hostnqn 2026-03-05T23:44:31.987 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-05T23:44:32.017 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdd1 2026-03-05T23:44:32.042 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme2 2026-03-05T23:44:32.045 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vde... 2026-03-05T23:44:32.045 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/vde/namespaces/1 && echo -n /dev/vde | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/vde/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/vde /sys/kernel/config/nvmet/ports/1/subsystems/vde && sudo nvme connect -t loop -n vde -q hostnqn 2026-03-05T23:44:32.125 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-03-05T23:44:32.156 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vde1 2026-03-05T23:44:32.182 INFO:teuthology.orchestra.run.vm09.stdout:connecting to device: nvme3 2026-03-05T23:44:32.186 DEBUG:teuthology.orchestra.run.vm09:> lsblk 2026-03-05T23:44:32.248 INFO:teuthology.orchestra.run.vm09.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-03-05T23:44:32.248 INFO:teuthology.orchestra.run.vm09.stdout:sr0 11:0 1 366K 0 rom 2026-03-05T23:44:32.248 INFO:teuthology.orchestra.run.vm09.stdout:vda 252:0 0 40G 0 disk 2026-03-05T23:44:32.248 INFO:teuthology.orchestra.run.vm09.stdout:└─vda1 252:1 0 40G 0 part / 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:vdb 252:16 0 20G 0 disk 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:vdc 252:32 0 20G 0 disk 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:vdd 252:48 0 20G 0 disk 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:vde 252:64 0 20G 0 disk 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:nvme0n1 259:1 0 20G 0 disk 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:nvme1n1 259:3 0 20G 0 disk 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:nvme2n1 259:5 0 20G 0 disk 2026-03-05T23:44:32.249 INFO:teuthology.orchestra.run.vm09.stdout:nvme3n1 259:7 0 20G 0 disk 2026-03-05T23:44:32.249 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme list -o json 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "Devices":[ 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme0n1", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng0n1", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"0b9d422655091c925040", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme1n1", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng1n1", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"5b9cd27a59e8b4e6f38b", 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-05T23:44:32.321 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme2n1", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng2n1", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"897917275fb9baa4e6f7", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace":1, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath":"/dev/nvme3n1", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "GenericPath":"/dev/ng3n1", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware":"5.14.0-6", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber":"Linux", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber":"34e34a17642618782c4d", 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes":21474836480, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA":41943040, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize":21474836480, 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize":512 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: } 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-03-05T23:44:32.322 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-03-05T23:44:32.322 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-03-05T23:44:32.397 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:32.397 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:32.397 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00515933 s, 794 kB/s 2026-03-05T23:44:32.400 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-03-05T23:44:32.472 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:32.472 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:32.472 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-05T23:44:32.473 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:32.555 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:32.555 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:32.556 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00514294 s, 796 kB/s 2026-03-05T23:44:32.560 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-03-05T23:44:32.633 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:32.633 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:32.633 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-05T23:44:32.634 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:32.707 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:32.707 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:32.707 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00573984 s, 714 kB/s 2026-03-05T23:44:32.712 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-03-05T23:44:32.784 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:32.784 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:32.784 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-05T23:44:32.785 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-03-05T23:44:32.859 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:32.859 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:32.860 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00486425 s, 842 kB/s 2026-03-05T23:44:32.864 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-03-05T23:44:32.941 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:32.941 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:32.941 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-05T23:44:32.942 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:33.016 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:33.016 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:33.016 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00414718 s, 988 kB/s 2026-03-05T23:44:33.017 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-03-05T23:44:33.091 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:33.092 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:33.092 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-05T23:44:33.093 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:33.174 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:33.174 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:33.174 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00532904 s, 769 kB/s 2026-03-05T23:44:33.180 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-03-05T23:44:33.251 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:33.251 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:33.251 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-05T23:44:33.253 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-03-05T23:44:33.331 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:33.331 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:33.331 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00489663 s, 836 kB/s 2026-03-05T23:44:33.336 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-03-05T23:44:33.413 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:33.413 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:33.413 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-05T23:44:33.414 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:33.492 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:33.492 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:33.492 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00388309 s, 1.1 MB/s 2026-03-05T23:44:33.493 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-03-05T23:44:33.564 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:33.564 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:33.564 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-05T23:44:33.565 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:33.647 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:33.647 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:33.647 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0055102 s, 743 kB/s 2026-03-05T23:44:33.652 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-03-05T23:44:33.723 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:33.723 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:33.723 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-05T23:44:33.724 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-03-05T23:44:33.796 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:33.796 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:33.796 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00510601 s, 802 kB/s 2026-03-05T23:44:33.801 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-03-05T23:44:33.868 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:33.868 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:33.868 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-03-05T23:44:33.869 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-03-05T23:44:33.946 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:33.946 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:33.946 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00634922 s, 645 kB/s 2026-03-05T23:44:33.953 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-03-05T23:44:34.034 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:34.034 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:34.034 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-03-05T23:44:34.035 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-03-05T23:44:34.111 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-03-05T23:44:34.111 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-03-05T23:44:34.111 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0036367 s, 1.1 MB/s 2026-03-05T23:44:34.113 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-03-05T23:44:34.181 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-03-05T23:44:34.181 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-03-05T23:44:34.181 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-03-05T23:44:34.182 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-05T23:44:34.182 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:44:34.182 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/scratch_devs 2026-03-05T23:44:34.251 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-05T23:44:34.302 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'OSD_DOWN', 'CEPHADM_FAILED_DAEMON', 'but is still running', 'PG_DEGRADED'], 'log-only-match': ['CEPHADM_'], 'sha1': 'c24117fd5525679b799527bc1bd1f1dd0a2db5e2', 'cephadm_binary_url': 'https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3'}} 2026-03-05T23:44:34.302 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-03-05T23:44:34.302 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 2026-03-05T23:44:34.303 INFO:tasks.cephadm:Cluster fsid is e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:44:34.303 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-05T23:44:34.303 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-05T23:44:34.303 INFO:tasks.cephadm:Monitor IPs: {'mon.vm02': '192.168.123.102', 'mon.vm09': '192.168.123.109'} 2026-03-05T23:44:34.303 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-05T23:44:34.303 DEBUG:teuthology.orchestra.run.vm02:> sudo hostname $(hostname -s) 2026-03-05T23:44:34.341 DEBUG:teuthology.orchestra.run.vm09:> sudo hostname $(hostname -s) 2026-03-05T23:44:34.372 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm 2026-03-05T23:44:34.372 DEBUG:teuthology.orchestra.run.vm02:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-05T23:44:35.486 INFO:teuthology.orchestra.run.vm02.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 5 23:44 /home/ubuntu/cephtest/cephadm 2026-03-05T23:44:35.486 DEBUG:teuthology.orchestra.run.vm09:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-05T23:44:36.542 INFO:teuthology.orchestra.run.vm09.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 5 23:44 /home/ubuntu/cephtest/cephadm 2026-03-05T23:44:36.543 DEBUG:teuthology.orchestra.run.vm02:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-05T23:44:36.568 DEBUG:teuthology.orchestra.run.vm09:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-05T23:44:36.594 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 on all hosts... 2026-03-05T23:44:36.594 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 pull 2026-03-05T23:44:36.610 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 pull 2026-03-05T23:44:37.039 INFO:teuthology.orchestra.run.vm09.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3... 2026-03-05T23:44:37.069 INFO:teuthology.orchestra.run.vm02.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3... 2026-03-05T23:45:01.666 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-05T23:45:01.666 INFO:teuthology.orchestra.run.vm02.stdout: "ceph_version": "ceph version 19.2.3-47-gc24117fd552 (c24117fd5525679b799527bc1bd1f1dd0a2db5e2) squid (stable)", 2026-03-05T23:45:01.666 INFO:teuthology.orchestra.run.vm02.stdout: "image_id": "306e97de47e91c2b4b24d3dc09be3b3a12039b078f343d91220102acc6628a68", 2026-03-05T23:45:01.666 INFO:teuthology.orchestra.run.vm02.stdout: "repo_digests": [ 2026-03-05T23:45:01.666 INFO:teuthology.orchestra.run.vm02.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b" 2026-03-05T23:45:01.666 INFO:teuthology.orchestra.run.vm02.stdout: ] 2026-03-05T23:45:01.666 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-05T23:45:03.984 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-03-05T23:45:03.984 INFO:teuthology.orchestra.run.vm09.stdout: "ceph_version": "ceph version 19.2.3-47-gc24117fd552 (c24117fd5525679b799527bc1bd1f1dd0a2db5e2) squid (stable)", 2026-03-05T23:45:03.984 INFO:teuthology.orchestra.run.vm09.stdout: "image_id": "306e97de47e91c2b4b24d3dc09be3b3a12039b078f343d91220102acc6628a68", 2026-03-05T23:45:03.984 INFO:teuthology.orchestra.run.vm09.stdout: "repo_digests": [ 2026-03-05T23:45:03.984 INFO:teuthology.orchestra.run.vm09.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b" 2026-03-05T23:45:03.984 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-03-05T23:45:03.984 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-03-05T23:45:04.013 DEBUG:teuthology.orchestra.run.vm02:> sudo mkdir -p /etc/ceph 2026-03-05T23:45:04.045 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /etc/ceph 2026-03-05T23:45:04.087 DEBUG:teuthology.orchestra.run.vm02:> sudo chmod 777 /etc/ceph 2026-03-05T23:45:04.118 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 777 /etc/ceph 2026-03-05T23:45:04.168 INFO:tasks.cephadm:Writing seed config... 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-05T23:45:04.169 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-05T23:45:04.170 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:45:04.170 DEBUG:teuthology.orchestra.run.vm02:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-05T23:45:04.191 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = e1ad3122-18e4-11f1-9926-f7644c158a97 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-05T23:45:04.191 DEBUG:teuthology.orchestra.run.vm02:mon.vm02> sudo journalctl -f -n 0 -u ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm02.service 2026-03-05T23:45:04.235 INFO:tasks.cephadm:Bootstrapping... 2026-03-05T23:45:04.235 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 -v bootstrap --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.102 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-05T23:45:04.567 INFO:teuthology.orchestra.run.vm02.stdout:-------------------------------------------------------------------------------- 2026-03-05T23:45:04.568 INFO:teuthology.orchestra.run.vm02.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3', '-v', 'bootstrap', '--fsid', 'e1ad3122-18e4-11f1-9926-f7644c158a97', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.102', '--skip-admin-label'] 2026-03-05T23:45:04.568 INFO:teuthology.orchestra.run.vm02.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-05T23:45:04.568 INFO:teuthology.orchestra.run.vm02.stdout:Verifying podman|docker is present... 2026-03-05T23:45:04.600 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stdout 5.8.0 2026-03-05T23:45:04.600 INFO:teuthology.orchestra.run.vm02.stdout:Verifying lvm2 is present... 2026-03-05T23:45:04.600 INFO:teuthology.orchestra.run.vm02.stdout:Verifying time synchronization is in place... 2026-03-05T23:45:04.611 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-05T23:45:04.611 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-05T23:45:04.618 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-05T23:45:04.618 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-05T23:45:04.625 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout enabled 2026-03-05T23:45:04.632 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout active 2026-03-05T23:45:04.632 INFO:teuthology.orchestra.run.vm02.stdout:Unit chronyd.service is enabled and running 2026-03-05T23:45:04.632 INFO:teuthology.orchestra.run.vm02.stdout:Repeating the final host check... 2026-03-05T23:45:04.657 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stdout 5.8.0 2026-03-05T23:45:04.657 INFO:teuthology.orchestra.run.vm02.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-05T23:45:04.658 INFO:teuthology.orchestra.run.vm02.stdout:systemctl is present 2026-03-05T23:45:04.658 INFO:teuthology.orchestra.run.vm02.stdout:lvcreate is present 2026-03-05T23:45:04.670 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-05T23:45:04.670 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-05T23:45:04.682 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-05T23:45:04.682 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-05T23:45:04.691 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout enabled 2026-03-05T23:45:04.700 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout active 2026-03-05T23:45:04.700 INFO:teuthology.orchestra.run.vm02.stdout:Unit chronyd.service is enabled and running 2026-03-05T23:45:04.700 INFO:teuthology.orchestra.run.vm02.stdout:Host looks OK 2026-03-05T23:45:04.700 INFO:teuthology.orchestra.run.vm02.stdout:Cluster fsid: e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:45:04.700 INFO:teuthology.orchestra.run.vm02.stdout:Acquiring lock 140687404944976 on /run/cephadm/e1ad3122-18e4-11f1-9926-f7644c158a97.lock 2026-03-05T23:45:04.700 INFO:teuthology.orchestra.run.vm02.stdout:Lock 140687404944976 acquired on /run/cephadm/e1ad3122-18e4-11f1-9926-f7644c158a97.lock 2026-03-05T23:45:04.700 INFO:teuthology.orchestra.run.vm02.stdout:Verifying IP 192.168.123.102 port 3300 ... 2026-03-05T23:45:04.701 INFO:teuthology.orchestra.run.vm02.stdout:Verifying IP 192.168.123.102 port 6789 ... 2026-03-05T23:45:04.701 INFO:teuthology.orchestra.run.vm02.stdout:Base mon IP(s) is [192.168.123.102:3300, 192.168.123.102:6789], mon addrv is [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-05T23:45:04.706 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.102 metric 100 2026-03-05T23:45:04.706 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.102 metric 100 2026-03-05T23:45:04.711 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-05T23:45:04.711 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-05T23:45:04.715 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-05T23:45:04.715 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-05T23:45:04.715 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-05T23:45:04.715 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-05T23:45:04.715 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:2/64 scope link noprefixroute 2026-03-05T23:45:04.715 INFO:teuthology.orchestra.run.vm02.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-05T23:45:04.716 INFO:teuthology.orchestra.run.vm02.stdout:Mon IP `192.168.123.102` is in CIDR network `192.168.123.0/24` 2026-03-05T23:45:04.716 INFO:teuthology.orchestra.run.vm02.stdout:Mon IP `192.168.123.102` is in CIDR network `192.168.123.0/24` 2026-03-05T23:45:04.716 INFO:teuthology.orchestra.run.vm02.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-05T23:45:04.716 INFO:teuthology.orchestra.run.vm02.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-05T23:45:04.717 INFO:teuthology.orchestra.run.vm02.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3... 2026-03-05T23:45:05.340 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stdout 306e97de47e91c2b4b24d3dc09be3b3a12039b078f343d91220102acc6628a68 2026-03-05T23:45:05.340 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3... 2026-03-05T23:45:05.340 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stderr Getting image source signatures 2026-03-05T23:45:05.340 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stderr Copying blob sha256:d21d4233fd3d4dd2f376e5ef084c47891c860682c1de15a9c0357cea5defbc91 2026-03-05T23:45:05.340 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stderr Copying config sha256:306e97de47e91c2b4b24d3dc09be3b3a12039b078f343d91220102acc6628a68 2026-03-05T23:45:05.340 INFO:teuthology.orchestra.run.vm02.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-05T23:45:05.599 INFO:teuthology.orchestra.run.vm02.stdout:ceph: stdout ceph version 19.2.3-47-gc24117fd552 (c24117fd5525679b799527bc1bd1f1dd0a2db5e2) squid (stable) 2026-03-05T23:45:05.599 INFO:teuthology.orchestra.run.vm02.stdout:Ceph version: ceph version 19.2.3-47-gc24117fd552 (c24117fd5525679b799527bc1bd1f1dd0a2db5e2) squid (stable) 2026-03-05T23:45:05.599 INFO:teuthology.orchestra.run.vm02.stdout:Extracting ceph user uid/gid from container image... 2026-03-05T23:45:05.716 INFO:teuthology.orchestra.run.vm02.stdout:stat: stdout 167 167 2026-03-05T23:45:05.716 INFO:teuthology.orchestra.run.vm02.stdout:Creating initial keys... 2026-03-05T23:45:05.940 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-authtool: stdout AQBxB6ppdvUcMBAA5IjFJk5JH7UwENITuwH+tw== 2026-03-05T23:45:06.076 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-authtool: stdout AQByB6pp/El/ARAA9Q8zJw76Rgk9uWLqKhd1KA== 2026-03-05T23:45:06.185 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-authtool: stdout AQByB6ppSjGpCRAA23UP0QeulahZhlhKNEL8qQ== 2026-03-05T23:45:06.185 INFO:teuthology.orchestra.run.vm02.stdout:Creating initial monmap... 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:monmaptool for vm02 [v2:192.168.123.102:3300,v1:192.168.123.102:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:setting min_mon_release = quincy 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: set fsid to e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:45:06.326 INFO:teuthology.orchestra.run.vm02.stdout:Creating mon... 2026-03-05T23:45:06.477 INFO:teuthology.orchestra.run.vm02.stdout:create mon.vm02 on 2026-03-05T23:45:06.835 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-05T23:45:07.018 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97.target → /etc/systemd/system/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97.target. 2026-03-05T23:45:07.018 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97.target → /etc/systemd/system/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97.target. 2026-03-05T23:45:07.209 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm02 2026-03-05T23:45:07.209 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to reset failed state of unit ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm02.service: Unit ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm02.service not loaded. 2026-03-05T23:45:07.402 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97.target.wants/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm02.service → /etc/systemd/system/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@.service. 2026-03-05T23:45:07.614 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-05T23:45:07.614 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to enable service . firewalld.service is not available 2026-03-05T23:45:07.614 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mon to start... 2026-03-05T23:45:07.614 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mon... 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout cluster: 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout id: e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout services: 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm02 (age 0.304349s) 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout data: 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout pgs: 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:mon is available 2026-03-05T23:45:08.270 INFO:teuthology.orchestra.run.vm02.stdout:Assimilating anything we can from ceph.conf... 2026-03-05T23:45:08.602 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:08.603 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [global] 2026-03-05T23:45:08.603 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout fsid = e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [osd] 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-05T23:45:08.604 INFO:teuthology.orchestra.run.vm02.stdout:Generating new minimal ceph.conf... 2026-03-05T23:45:09.136 INFO:teuthology.orchestra.run.vm02.stdout:Restarting the monitor... 2026-03-05T23:45:09.645 INFO:teuthology.orchestra.run.vm02.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-05T23:45:10.038 INFO:teuthology.orchestra.run.vm02.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-05T23:45:10.038 INFO:teuthology.orchestra.run.vm02.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-05T23:45:10.038 INFO:teuthology.orchestra.run.vm02.stdout:Creating mgr... 2026-03-05T23:45:10.039 INFO:teuthology.orchestra.run.vm02.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-05T23:45:10.039 INFO:teuthology.orchestra.run.vm02.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-05T23:45:10.040 INFO:teuthology.orchestra.run.vm02.stdout:Verifying port 0.0.0.0:8443 ... 2026-03-05T23:45:10.248 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mgr.vm02.trdlkm 2026-03-05T23:45:10.248 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to reset failed state of unit ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mgr.vm02.trdlkm.service: Unit ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mgr.vm02.trdlkm.service not loaded. 2026-03-05T23:45:10.434 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97.target.wants/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mgr.vm02.trdlkm.service → /etc/systemd/system/ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@.service. 2026-03-05T23:45:10.641 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-05T23:45:10.641 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to enable service . firewalld.service is not available 2026-03-05T23:45:10.641 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-05T23:45:10.641 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-03-05T23:45:10.641 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr to start... 2026-03-05T23:45:10.641 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr... 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e1ad3122-18e4-11f1-9926-f7644c158a97", 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:11.055 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-05T22:45:07:639454+0000", 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-05T22:45:07.640131+0000", 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:11.056 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (1/15)... 2026-03-05T23:45:13.435 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:13.435 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:13.435 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e1ad3122-18e4-11f1-9926-f7644c158a97", 2026-03-05T23:45:13.435 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-05T23:45:13.435 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-05T23:45:13.435 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-05T22:45:07:639454+0000", 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-05T22:45:07.640131+0000", 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-05T23:45:13.436 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:13.437 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (2/15)... 2026-03-05T23:45:15.815 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:15.815 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e1ad3122-18e4-11f1-9926-f7644c158a97", 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 6, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-05T23:45:15.816 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-05T22:45:07:639454+0000", 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-05T22:45:07.640131+0000", 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:15.817 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (3/15)... 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e1ad3122-18e4-11f1-9926-f7644c158a97", 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 8, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-05T23:45:18.213 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-05T22:45:07:639454+0000", 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-05T22:45:07.640131+0000", 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:18.214 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (4/15)... 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e1ad3122-18e4-11f1-9926-f7644c158a97", 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-05T23:45:20.668 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-05T22:45:07:639454+0000", 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-05T22:45:07.640131+0000", 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:20.669 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (5/15)... 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e1ad3122-18e4-11f1-9926-f7644c158a97", 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 13, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-05T23:45:22.976 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-05T22:45:07:639454+0000", 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-05T22:45:07.640131+0000", 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:22.977 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (6/15)... 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e1ad3122-18e4-11f1-9926-f7644c158a97", 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-05T23:45:25.422 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 15, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-05T22:45:07:639454+0000", 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-05T23:45:25.423 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-05T22:45:07.640131+0000", 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:25.424 INFO:teuthology.orchestra.run.vm02.stdout:mgr is available 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [global] 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout fsid = e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-05T23:45:25.845 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [osd] 2026-03-05T23:45:25.846 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-05T23:45:25.846 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-05T23:45:25.846 INFO:teuthology.orchestra.run.vm02.stdout:Enabling cephadm module... 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "active_name": "vm02.trdlkm", 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for the mgr to restart... 2026-03-05T23:45:27.134 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr epoch 5... 2026-03-05T23:45:39.508 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:39.508 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-05T23:45:39.508 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-05T23:45:39.508 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:39.508 INFO:teuthology.orchestra.run.vm02.stdout:mgr epoch 5 is available 2026-03-05T23:45:39.508 INFO:teuthology.orchestra.run.vm02.stdout:Setting orchestrator backend to cephadm... 2026-03-05T23:45:40.452 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-05T23:45:40.452 INFO:teuthology.orchestra.run.vm02.stdout:Generating ssh key... 2026-03-05T23:45:41.380 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeKAwOxJ30VK62aU4BtwWBkHxxQgEEWRgm6V20uniYpcbHjylkkoXs70x0CXte3EENbUQhotIEf1wGn+RbKhkadCy584/ZC+PRlAW6mvLmQZesUeECqVHDmB5HFgdSNd4hKCjmgWAFdbB42hTH/9wCfsQAPdQkLo/FWQqMtmqPY7N384dTSOLuGV8b/JjhnFameimFaDCm/K+KuFT8m07bKgruuUEkvHRlILS358BYW7g8FgNtZQy6at3i7cO2MTmCsPmB/i7swUPf6jc10Ks3li1D2n0gEfxCwJGrl0jok7pq45bPxAc/fP5QRECs+zb815DI0Pbafzz2INKRwn5Zo8pJpAVj+n7OkhKJti0ihDY48DEQjitR0EFDO8cOnMb6tKEi2O3zskChdMYGzsglqBWoE5E9qGlzTFQnoJvY0VDW1V3SKEJzfCy5FpM9kFT5dUy7hb3Pg1jI1TdnaF4UoyWakrX132GLYam+TtKwW38snM0gUjyEs+sZT/1knrc= ceph-e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:45:41.380 INFO:teuthology.orchestra.run.vm02.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-05T23:45:41.380 INFO:teuthology.orchestra.run.vm02.stdout:Adding key to root@localhost authorized_keys... 2026-03-05T23:45:41.380 INFO:teuthology.orchestra.run.vm02.stdout:Adding host vm02... 2026-03-05T23:45:44.371 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Added host 'vm02' with addr '192.168.123.102' 2026-03-05T23:45:44.371 INFO:teuthology.orchestra.run.vm02.stdout:Deploying mon service with default placement... 2026-03-05T23:45:44.971 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-05T23:45:44.971 INFO:teuthology.orchestra.run.vm02.stdout:Deploying mgr service with default placement... 2026-03-05T23:45:45.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-05T23:45:45.401 INFO:teuthology.orchestra.run.vm02.stdout:Deploying crash service with default placement... 2026-03-05T23:45:45.888 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-03-05T23:45:45.888 INFO:teuthology.orchestra.run.vm02.stdout:Deploying ceph-exporter service with default placement... 2026-03-05T23:45:46.539 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-03-05T23:45:46.539 INFO:teuthology.orchestra.run.vm02.stdout:Deploying prometheus service with default placement... 2026-03-05T23:45:47.077 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-03-05T23:45:47.077 INFO:teuthology.orchestra.run.vm02.stdout:Deploying grafana service with default placement... 2026-03-05T23:45:47.565 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-03-05T23:45:47.565 INFO:teuthology.orchestra.run.vm02.stdout:Deploying node-exporter service with default placement... 2026-03-05T23:45:48.156 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-03-05T23:45:48.156 INFO:teuthology.orchestra.run.vm02.stdout:Deploying alertmanager service with default placement... 2026-03-05T23:45:48.757 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-03-05T23:45:49.930 INFO:teuthology.orchestra.run.vm02.stdout:Enabling the dashboard module... 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "active_name": "vm02.trdlkm", 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for the mgr to restart... 2026-03-05T23:45:51.538 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr epoch 9... 2026-03-05T23:46:02.664 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-05T23:46:02.664 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-05T23:46:02.664 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-05T23:46:02.664 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-05T23:46:02.664 INFO:teuthology.orchestra.run.vm02.stdout:mgr epoch 9 is available 2026-03-05T23:46:02.664 INFO:teuthology.orchestra.run.vm02.stdout:Generating a dashboard self-signed certificate... 2026-03-05T23:46:03.452 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-05T23:46:03.453 INFO:teuthology.orchestra.run.vm02.stdout:Creating initial admin user... 2026-03-05T23:46:04.373 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$0oMhYzbtSMV7eqdTNBT67eWdQallI1o5ZXG70B7jI66kKtcZ3uZwy", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1772750764, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-05T23:46:04.374 INFO:teuthology.orchestra.run.vm02.stdout:Fetching dashboard port number... 2026-03-05T23:46:04.795 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 8443 2026-03-05T23:46:04.795 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-05T23:46:04.795 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-05T23:46:04.797 INFO:teuthology.orchestra.run.vm02.stdout:Ceph Dashboard is now available at: 2026-03-05T23:46:04.797 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:04.797 INFO:teuthology.orchestra.run.vm02.stdout: URL: https://vm02.local:8443/ 2026-03-05T23:46:04.797 INFO:teuthology.orchestra.run.vm02.stdout: User: admin 2026-03-05T23:46:04.798 INFO:teuthology.orchestra.run.vm02.stdout: Password: ub01jwkuct 2026-03-05T23:46:04.798 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:04.798 INFO:teuthology.orchestra.run.vm02.stdout:Saving cluster configuration to /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config directory 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout:Or, if you are only running a single cluster on this host: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: ceph telemetry on 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout:For more information see: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:05.236 INFO:teuthology.orchestra.run.vm02.stdout:Bootstrap complete. 2026-03-05T23:46:05.300 INFO:tasks.cephadm:Fetching config... 2026-03-05T23:46:05.300 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:46:05.300 DEBUG:teuthology.orchestra.run.vm02:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-05T23:46:05.365 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-05T23:46:05.365 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:46:05.365 DEBUG:teuthology.orchestra.run.vm02:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-05T23:46:05.395 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-05T23:46:05.395 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:46:05.395 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/keyring of=/dev/stdout 2026-03-05T23:46:05.496 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-05T23:46:05.496 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:46:05.496 DEBUG:teuthology.orchestra.run.vm02:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-05T23:46:05.565 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-05T23:46:05.565 DEBUG:teuthology.orchestra.run.vm02:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeKAwOxJ30VK62aU4BtwWBkHxxQgEEWRgm6V20uniYpcbHjylkkoXs70x0CXte3EENbUQhotIEf1wGn+RbKhkadCy584/ZC+PRlAW6mvLmQZesUeECqVHDmB5HFgdSNd4hKCjmgWAFdbB42hTH/9wCfsQAPdQkLo/FWQqMtmqPY7N384dTSOLuGV8b/JjhnFameimFaDCm/K+KuFT8m07bKgruuUEkvHRlILS358BYW7g8FgNtZQy6at3i7cO2MTmCsPmB/i7swUPf6jc10Ks3li1D2n0gEfxCwJGrl0jok7pq45bPxAc/fP5QRECs+zb815DI0Pbafzz2INKRwn5Zo8pJpAVj+n7OkhKJti0ihDY48DEQjitR0EFDO8cOnMb6tKEi2O3zskChdMYGzsglqBWoE5E9qGlzTFQnoJvY0VDW1V3SKEJzfCy5FpM9kFT5dUy7hb3Pg1jI1TdnaF4UoyWakrX132GLYam+TtKwW38snM0gUjyEs+sZT/1knrc= ceph-e1ad3122-18e4-11f1-9926-f7644c158a97' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-05T23:46:05.667 INFO:teuthology.orchestra.run.vm02.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeKAwOxJ30VK62aU4BtwWBkHxxQgEEWRgm6V20uniYpcbHjylkkoXs70x0CXte3EENbUQhotIEf1wGn+RbKhkadCy584/ZC+PRlAW6mvLmQZesUeECqVHDmB5HFgdSNd4hKCjmgWAFdbB42hTH/9wCfsQAPdQkLo/FWQqMtmqPY7N384dTSOLuGV8b/JjhnFameimFaDCm/K+KuFT8m07bKgruuUEkvHRlILS358BYW7g8FgNtZQy6at3i7cO2MTmCsPmB/i7swUPf6jc10Ks3li1D2n0gEfxCwJGrl0jok7pq45bPxAc/fP5QRECs+zb815DI0Pbafzz2INKRwn5Zo8pJpAVj+n7OkhKJti0ihDY48DEQjitR0EFDO8cOnMb6tKEi2O3zskChdMYGzsglqBWoE5E9qGlzTFQnoJvY0VDW1V3SKEJzfCy5FpM9kFT5dUy7hb3Pg1jI1TdnaF4UoyWakrX132GLYam+TtKwW38snM0gUjyEs+sZT/1knrc= ceph-e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:46:05.690 DEBUG:teuthology.orchestra.run.vm09:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeKAwOxJ30VK62aU4BtwWBkHxxQgEEWRgm6V20uniYpcbHjylkkoXs70x0CXte3EENbUQhotIEf1wGn+RbKhkadCy584/ZC+PRlAW6mvLmQZesUeECqVHDmB5HFgdSNd4hKCjmgWAFdbB42hTH/9wCfsQAPdQkLo/FWQqMtmqPY7N384dTSOLuGV8b/JjhnFameimFaDCm/K+KuFT8m07bKgruuUEkvHRlILS358BYW7g8FgNtZQy6at3i7cO2MTmCsPmB/i7swUPf6jc10Ks3li1D2n0gEfxCwJGrl0jok7pq45bPxAc/fP5QRECs+zb815DI0Pbafzz2INKRwn5Zo8pJpAVj+n7OkhKJti0ihDY48DEQjitR0EFDO8cOnMb6tKEi2O3zskChdMYGzsglqBWoE5E9qGlzTFQnoJvY0VDW1V3SKEJzfCy5FpM9kFT5dUy7hb3Pg1jI1TdnaF4UoyWakrX132GLYam+TtKwW38snM0gUjyEs+sZT/1knrc= ceph-e1ad3122-18e4-11f1-9926-f7644c158a97' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-05T23:46:05.722 INFO:teuthology.orchestra.run.vm09.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeKAwOxJ30VK62aU4BtwWBkHxxQgEEWRgm6V20uniYpcbHjylkkoXs70x0CXte3EENbUQhotIEf1wGn+RbKhkadCy584/ZC+PRlAW6mvLmQZesUeECqVHDmB5HFgdSNd4hKCjmgWAFdbB42hTH/9wCfsQAPdQkLo/FWQqMtmqPY7N384dTSOLuGV8b/JjhnFameimFaDCm/K+KuFT8m07bKgruuUEkvHRlILS358BYW7g8FgNtZQy6at3i7cO2MTmCsPmB/i7swUPf6jc10Ks3li1D2n0gEfxCwJGrl0jok7pq45bPxAc/fP5QRECs+zb815DI0Pbafzz2INKRwn5Zo8pJpAVj+n7OkhKJti0ihDY48DEQjitR0EFDO8cOnMb6tKEi2O3zskChdMYGzsglqBWoE5E9qGlzTFQnoJvY0VDW1V3SKEJzfCy5FpM9kFT5dUy7hb3Pg1jI1TdnaF4UoyWakrX132GLYam+TtKwW38snM0gUjyEs+sZT/1knrc= ceph-e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:46:05.730 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-05T23:46:06.304 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:46:07.041 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-05T23:46:07.041 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-05T23:46:07.597 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:46:08.248 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm09 2026-03-05T23:46:08.248 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:46:08.248 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.conf 2026-03-05T23:46:08.265 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:46:08.265 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-05T23:46:08.322 INFO:tasks.cephadm:Adding host vm09 to orchestrator... 2026-03-05T23:46:08.322 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph orch host add vm09 2026-03-05T23:46:08.877 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:46:12.910 INFO:teuthology.orchestra.run.vm02.stdout:Added host 'vm09' with addr '192.168.123.109' 2026-03-05T23:46:13.004 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph orch host ls --format=json 2026-03-05T23:46:13.537 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:46:14.108 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:46:14.108 INFO:teuthology.orchestra.run.vm02.stdout:[{"addr": "192.168.123.102", "hostname": "vm02", "labels": [], "status": ""}, {"addr": "192.168.123.109", "hostname": "vm09", "labels": [], "status": ""}] 2026-03-05T23:46:14.188 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-05T23:46:14.188 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd crush tunables default 2026-03-05T23:46:14.977 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:46:16.415 INFO:teuthology.orchestra.run.vm02.stderr:adjusted tunables profile to default 2026-03-05T23:46:16.513 INFO:tasks.cephadm:Adding mon.vm02 on vm02 2026-03-05T23:46:16.513 INFO:tasks.cephadm:Adding mon.vm09 on vm09 2026-03-05T23:46:16.514 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph orch apply mon '2;vm02:192.168.123.102=vm02;vm09:192.168.123.109=vm09' 2026-03-05T23:46:16.870 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:16.910 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:17.280 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mon update... 2026-03-05T23:46:17.361 DEBUG:teuthology.orchestra.run.vm09:mon.vm09> sudo journalctl -f -n 0 -u ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm09.service 2026-03-05T23:46:17.363 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:17.363 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:17.752 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:17.799 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:18.176 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:18.176 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:18.176 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:19.257 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:19.258 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:19.581 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:19.620 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:20.012 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:20.012 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:20.012 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:21.078 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:21.078 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:21.425 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:21.465 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:21.834 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:21.834 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:21.834 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:22.910 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:22.910 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:23.228 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:23.268 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:23.627 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:23.627 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:23.627 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:24.682 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:24.682 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:25.003 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:25.038 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:25.390 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:25.390 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:25.390 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:26.468 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:26.468 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:26.796 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:26.836 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:27.205 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:27.206 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:27.206 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:28.289 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:28.290 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:28.610 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:28.646 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:29.004 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:29.005 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:29.005 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:30.081 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:30.082 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:30.425 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:30.465 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:30.852 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:30.852 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:30.853 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:31.909 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:31.909 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:32.237 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:32.277 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:32.650 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:32.650 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:32.650 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:33.743 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:33.743 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:34.074 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:34.116 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:34.495 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:34.495 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:34.495 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:35.565 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:35.565 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:35.896 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:35.933 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:36.344 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:36.345 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:36.345 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:37.410 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:37.410 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:37.742 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:37.786 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:38.157 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:38.157 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:38.157 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:39.245 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:39.245 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:39.584 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:39.617 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:39.999 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:39.999 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:39.999 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:39 vm02 ceph-mon[65842]: mgrmap e14: vm02.trdlkm(active, since 37s) 2026-03-05T23:46:41.079 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:41.079 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:41.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:40 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2428174933' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:41.417 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:41.457 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:41.838 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:41.838 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:41.838 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:42.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:41 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/4116683933' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:42.917 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:42.917 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:43.261 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:43.295 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:43.668 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:43.668 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:43.668 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:44.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:43 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/3764951101' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:44.756 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:44.756 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:45.114 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:45.157 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:45.520 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:45.520 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:45.520 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:45.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:45 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/106849145' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:46.568 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:46.568 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:46.952 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:46.994 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:47.370 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:47.370 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:47.370 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:47.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:47 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/893775357' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:48.443 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:48.443 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:48.769 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:48.804 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:49.193 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:49.193 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:49.194 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:49.602 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:49 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2486482902' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:50.264 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:50.264 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: Active manager daemon vm02.trdlkm restarted 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: Activating manager daemon vm02.trdlkm 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: osdmap e5: 0 total, 0 up, 0 in 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: mgrmap e15: vm02.trdlkm(active, starting, since 0.0039973s) 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr metadata", "who": "vm02.trdlkm", "id": "vm02.trdlkm"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: Manager daemon vm02.trdlkm is now available 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.trdlkm/mirror_snapshot_schedule"}]: dispatch 2026-03-05T23:46:50.579 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:50 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.trdlkm/trash_purge_schedule"}]: dispatch 2026-03-05T23:46:50.622 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:50.661 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:51.067 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:51.068 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:51.068 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: mgrmap e16: vm02.trdlkm(active, since 1.0115s) 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: [05/Mar/2026:22:46:50] ENGINE Bus STARTING 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: [05/Mar/2026:22:46:50] ENGINE Serving on https://192.168.123.102:7150 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: [05/Mar/2026:22:46:50] ENGINE Client ('192.168.123.102', 34450) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2334336043' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:51.420 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:51 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:52.141 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:52.141 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:52.477 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:52.525 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:52.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:52 vm02 ceph-mon[65842]: [05/Mar/2026:22:46:50] ENGINE Serving on http://192.168.123.102:8765 2026-03-05T23:46:52.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:52 vm02 ceph-mon[65842]: [05/Mar/2026:22:46:50] ENGINE Bus STARTED 2026-03-05T23:46:52.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:52 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:52.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:52 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:52.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:52 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-05T23:46:52.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:52 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:53.118 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:53.118 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:53.118 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:54.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:53 vm02 ceph-mon[65842]: mgrmap e17: vm02.trdlkm(active, since 3s) 2026-03-05T23:46:54.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:53 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/3864879144' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:54.300 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:54.300 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:54.659 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:54.713 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-05T23:46:55.105 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:55.105 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:55.105 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:55.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:55 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2161265915' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:56.183 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:56.184 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:56.585 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:46:56.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-05T23:46:56.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:46:56.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm02:/etc/ceph/ceph.conf 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm09:/etc/ceph/ceph.conf 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm02:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm09:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm02:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.client.admin.keyring 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Updating vm09:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.client.admin.keyring 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:46:56.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:56 vm02 ceph-mon[65842]: Deploying daemon ceph-exporter.vm09 on vm09 2026-03-05T23:46:57.085 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:57.085 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:57.085 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:46:57.646 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:57 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2938634719' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:46:58.307 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:46:58.308 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:46:58.836 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:46:58.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:58.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:58.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:58.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:46:58.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-05T23:46:58.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-05T23:46:58.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:46:58.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:58 vm02 ceph-mon[65842]: Deploying daemon crash.vm09 on vm09 2026-03-05T23:46:59.357 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:46:59.358 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:46:59.358 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:47:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:59 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:59 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:59 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:59 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:59 vm02 ceph-mon[65842]: Deploying daemon node-exporter.vm09 on vm09 2026-03-05T23:47:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:59 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/3744668713' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:46:59 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:00.495 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:47:00.495 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:47:00.816 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:01.183 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:47:01.183 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:47:01.183 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:47:01.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:01 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2314350066' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:02.346 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:47:02.346 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:47:02.771 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:03.259 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:47:03.259 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:45:06.277706Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-05T23:47:03.259 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.fivqds", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.fivqds", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:03.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:02 vm02 ceph-mon[65842]: Deploying daemon mgr.vm09.fivqds on vm09 2026-03-05T23:47:04.402 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-05T23:47:04.403 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph mon dump -f json 2026-03-05T23:47:04.432 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 systemd[1]: Starting Ceph mon.vm09 for e1ad3122-18e4-11f1-9926-f7644c158a97... 2026-03-05T23:47:04.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-05T23:47:04.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:04.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: Deploying daemon mon.vm09 on vm09 2026-03-05T23:47:04.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:04 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/1708288707' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.802 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:47:04.844 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 podman[70708]: 2026-03-05 23:47:04.456575417 +0100 CET m=+0.020708795 container create 1b91af5f8dfd6eaea3878697e2f1ae95100c47b8c8923d9f207da475a9d2834e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b, name=ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-47-gc24117fd552, CEPH_SHA1=c24117fd5525679b799527bc1bd1f1dd0a2db5e2) 2026-03-05T23:47:04.844 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 podman[70708]: 2026-03-05 23:47:04.531799004 +0100 CET m=+0.095932382 container init 1b91af5f8dfd6eaea3878697e2f1ae95100c47b8c8923d9f207da475a9d2834e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b, name=ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09, CEPH_REF=19.2.3-47-gc24117fd552, CEPH_SHA1=c24117fd5525679b799527bc1bd1f1dd0a2db5e2, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-05T23:47:04.844 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 podman[70708]: 2026-03-05 23:47:04.545637485 +0100 CET m=+0.109770863 container start 1b91af5f8dfd6eaea3878697e2f1ae95100c47b8c8923d9f207da475a9d2834e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b, name=ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-47-gc24117fd552, CEPH_SHA1=c24117fd5525679b799527bc1bd1f1dd0a2db5e2, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-05T23:47:04.844 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 podman[70708]: 2026-03-05 23:47:04.450696895 +0100 CET m=+0.014830273 image pull 306e97de47e91c2b4b24d3dc09be3b3a12039b078f343d91220102acc6628a68 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b 2026-03-05T23:47:04.844 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 bash[70708]: 1b91af5f8dfd6eaea3878697e2f1ae95100c47b8c8923d9f207da475a9d2834e 2026-03-05T23:47:04.844 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: set uid:gid to 167:167 (ceph:ceph) 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: ceph version 19.2.3-47-gc24117fd552 (c24117fd5525679b799527bc1bd1f1dd0a2db5e2) squid (stable), process ceph-mon, pid 2 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: pidfile_write: ignore empty --pid-file 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: load: jerasure load: lrc 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: RocksDB version: 7.9.2 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Git sha 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Compile date 2026-03-03 21:08:28 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: DB SUMMARY 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: DB Session ID: LPYSHUXBXI86HJ3W68PX 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: CURRENT file: CURRENT 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: IDENTITY file: IDENTITY 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm09/store.db dir, Total Num: 0, files: 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm09/store.db: 000004.log size: 511 ; 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.error_if_exists: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.create_if_missing: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.paranoid_checks: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.env: 0x55923869aca0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.fs: PosixFileSystem 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.info_log: 0x559239dfc660 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_file_opening_threads: 16 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.statistics: (nil) 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.use_fsync: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_log_file_size: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.keep_log_file_num: 1000 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.recycle_log_file_num: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.allow_fallocate: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.allow_mmap_reads: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.allow_mmap_writes: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.use_direct_reads: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.create_missing_column_families: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.db_log_dir: 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.wal_dir: 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.advise_random_on_open: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.db_write_buffer_size: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.write_buffer_manager: 0x559239e01900 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.rate_limiter: (nil) 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.wal_recovery_mode: 2 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.enable_thread_tracking: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.enable_pipelined_write: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.unordered_write: 0 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-05T23:47:04.845 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.row_cache: None 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.wal_filter: None 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.allow_ingest_behind: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.two_write_queues: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.manual_wal_flush: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.wal_compression: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.atomic_flush: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.log_readahead_size: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.best_efforts_recovery: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.allow_data_in_errors: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.db_host_id: __hostname__ 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_background_jobs: 2 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_background_compactions: -1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_subcompactions: 1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_total_wal_size: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_open_files: -1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bytes_per_sync: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_readahead_size: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_background_flushes: -1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Compression algorithms supported: 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kZSTD supported: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kXpressCompression supported: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kBZip2Compression supported: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kLZ4Compression supported: 1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kZlibCompression supported: 1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kLZ4HCCompression supported: 1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: kSnappyCompression supported: 1 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm09/store.db/MANIFEST-000005 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.merge_operator: 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_filter: None 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_filter_factory: None 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.sst_partitioner_factory: None 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559239dfc560) 2026-03-05T23:47:04.846 INFO:journalctl@ceph.mon.vm09.vm09.stdout: cache_index_and_filter_blocks: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: pin_top_level_index_and_filter: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: index_type: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_block_index_type: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: index_shortening: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: checksum: 4 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: no_block_cache: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_cache: 0x559239e211f0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_cache_name: BinnedLRUCache 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_cache_options: 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: capacity : 536870912 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: num_shard_bits : 4 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: strict_capacity_limit : 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: high_pri_pool_ratio: 0.000 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_cache_compressed: (nil) 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: persistent_cache: (nil) 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_size: 4096 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_size_deviation: 10 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_restart_interval: 16 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: index_block_restart_interval: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: metadata_block_size: 4096 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: partition_filters: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: use_delta_encoding: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: filter_policy: bloomfilter 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: whole_key_filtering: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: verify_compression: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: read_amp_bytes_per_bit: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: format_version: 5 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: enable_index_compression: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: block_align: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: max_auto_readahead_size: 262144 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: prepopulate_block_cache: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: initial_auto_readahead_size: 8192 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout: num_file_reads_for_auto_readahead: 2 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.write_buffer_size: 33554432 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_write_buffer_number: 2 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression: NoCompression 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression: Disabled 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.prefix_extractor: nullptr 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.num_levels: 7 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.level: 32767 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.strategy: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.enabled: false 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-05T23:47:04.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.target_file_size_base: 67108864 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.arena_block_size: 1048576 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.disable_auto_compactions: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.inplace_update_support: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.bloom_locality: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.max_successive_merges: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.paranoid_file_checks: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.force_consistency_checks: 1 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.report_bg_io_stats: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.ttl: 2592000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.enable_blob_files: false 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.min_blob_size: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.blob_file_size: 268435456 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.blob_file_starting_level: 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 systemd[1]: Started Ceph mon.vm09 for e1ad3122-18e4-11f1-9926-f7644c158a97. 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm09/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: de1fb2af-6655-4571-9b76-c85cc4c1e7dc 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772750824585924, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772750824586735, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772750824, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "de1fb2af-6655-4571-9b76-c85cc4c1e7dc", "db_session_id": "LPYSHUXBXI86HJ3W68PX", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772750824586799, "job": 1, "event": "recovery_finished"} 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm09/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x559239e22e00 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: DB pointer 0x559239f3e000 2026-03-05T23:47:04.848 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09 does not exist in monmap, will attempt to join an existing cluster 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: using public_addr v2:192.168.123.109:0/0 -> [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ** DB Stats ** 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ** Compaction Stats [default] ** 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ** Compaction Stats [default] ** 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Cumulative compaction: 0.00 GB write, 0.06 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-05T23:47:04.849 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Interval compaction: 0.00 GB write, 0.06 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Block cache BinnedLRUCache@0x559239e211f0#2 capacity: 512.00 MB usage: 0.86 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 9e-06 secs_since: 0 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Block cache entry stats(count,size,portion): DataBlock(1,0.64 KB,0.00012219%) FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout: 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: starting mon.vm09 rank -1 at public addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] at bind addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon_data /var/lib/ceph/mon/ceph-vm09 fsid e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(???) e0 preinit fsid e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2021852290' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.854 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/1393108016' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Deploying daemon prometheus.vm02 on vm02 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2144916961' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/166998620' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/1744607518' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2488861768' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14170 192.168.123.102:0/2278044456' entity='mgr.vm02.trdlkm' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mgrmap e14: vm02.trdlkm(active, since 37s) 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2428174933' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/4116683933' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/3764951101' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/106849145' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/893775357' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2486482902' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Active manager daemon vm02.trdlkm restarted 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Activating manager daemon vm02.trdlkm 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: osdmap e5: 0 total, 0 up, 0 in 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mgrmap e15: vm02.trdlkm(active, starting, since 0.0039973s) 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr metadata", "who": "vm02.trdlkm", "id": "vm02.trdlkm"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Manager daemon vm02.trdlkm is now available 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.trdlkm/mirror_snapshot_schedule"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.trdlkm/trash_purge_schedule"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mgrmap e16: vm02.trdlkm(active, since 1.0115s) 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: [05/Mar/2026:22:46:50] ENGINE Bus STARTING 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: [05/Mar/2026:22:46:50] ENGINE Serving on https://192.168.123.102:7150 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: [05/Mar/2026:22:46:50] ENGINE Client ('192.168.123.102', 34450) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2334336043' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: [05/Mar/2026:22:46:50] ENGINE Serving on http://192.168.123.102:8765 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: [05/Mar/2026:22:46:50] ENGINE Bus STARTED 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mgrmap e17: vm02.trdlkm(active, since 3s) 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/3864879144' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2161265915' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.855 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm02:/etc/ceph/ceph.conf 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm09:/etc/ceph/ceph.conf 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm02:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm09:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm02:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.client.admin.keyring 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Updating vm09:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.client.admin.keyring 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Deploying daemon ceph-exporter.vm09 on vm09 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2938634719' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Deploying daemon crash.vm09 on vm09 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Deploying daemon node-exporter.vm09 on vm09 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/3744668713' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2314350066' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.fivqds", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.fivqds", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Deploying daemon mgr.vm09.fivqds on vm09 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: Deploying daemon mon.vm09 on vm09 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/1708288707' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).mds e0 Unable to load 'last_metadata' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).mds e0 Unable to load 'last_metadata' 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).mds e1 new map 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).mds e1 print_map 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout: e1 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout: btime 2026-03-05T22:45:07:639454+0000 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout: legacy client fscid: -1 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout: 2026-03-05T23:47:04.856 INFO:journalctl@ceph.mon.vm09.vm09.stdout: No filesystems configured 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).mgr e0 loading version 17 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).mgr e17 active server: [v2:192.168.123.102:6800/1056360856,v1:192.168.123.102:6801/1056360856](14227) 2026-03-05T23:47:04.857 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:04 vm09 ceph-mon[70730]: mon.vm09@-1(synchronizing).mgr e17 mkfs or daemon transitioned to available, loading commands 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: mon.vm02 calling monitor election 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: mon.vm09 calling monitor election 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: mon.vm02 is new leader, mons vm02,vm09 in quorum (ranks 0,1) 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: monmap epoch 2 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: fsid e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:47:10.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: last_changed 2026-03-05T22:47:04.646882+0000 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: created 2026-03-05T22:45:06.277706+0000 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: min_mon_release 19 (squid) 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: election_strategy: 1 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: fsmap 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: osdmap e5: 0 total, 0 up, 0 in 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: mgrmap e17: vm02.trdlkm(active, since 20s) 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: overall HEALTH_OK 2026-03-05T23:47:10.431 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:09 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:10.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-05T23:47:10.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: mon.vm02 calling monitor election 2026-03-05T23:47:10.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:47:10.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: mon.vm09 calling monitor election 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: mon.vm02 is new leader, mons vm02,vm09 in quorum (ranks 0,1) 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: monmap epoch 2 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: fsid e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: last_changed 2026-03-05T22:47:04.646882+0000 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: created 2026-03-05T22:45:06.277706+0000 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: min_mon_release 19 (squid) 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: election_strategy: 1 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: fsmap 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: osdmap e5: 0 total, 0 up, 0 in 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: mgrmap e17: vm02.trdlkm(active, since 20s) 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: overall HEALTH_OK 2026-03-05T23:47:10.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:09 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:11.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:11 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:11.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:11 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:11.264 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-05T23:47:11.264 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":2,"fsid":"e1ad3122-18e4-11f1-9926-f7644c158a97","modified":"2026-03-05T22:47:04.646882Z","created":"2026-03-05T22:45:06.277706Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm09","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:3300","nonce":0},{"type":"v1","addr":"192.168.123.109:6789","nonce":0}]},"addr":"192.168.123.109:6789/0","public_addr":"192.168.123.109:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-05T23:47:11.264 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 2 2026-03-05T23:47:11.358 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-05T23:47:11.358 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph config generate-minimal-conf 2026-03-05T23:47:11.383 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:11 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mon metadata", "id": "vm09"}]: dispatch 2026-03-05T23:47:11.383 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:11 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:11.726 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:12.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:12.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:47:12.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: Updating vm02:/etc/ceph/ceph.conf 2026-03-05T23:47:12.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: Updating vm09:/etc/ceph/ceph.conf 2026-03-05T23:47:12.100 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: Updating vm02:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: Updating vm09:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/1185240389' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: Reconfiguring mon.vm02 (unknown last config time)... 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: Reconfiguring daemon mon.vm02 on vm02 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm02.trdlkm", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-05T23:47:12.101 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:12 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:12.130 INFO:teuthology.orchestra.run.vm02.stdout:# minimal ceph.conf for e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:47:12.130 INFO:teuthology.orchestra.run.vm02.stdout:[global] 2026-03-05T23:47:12.130 INFO:teuthology.orchestra.run.vm02.stdout: fsid = e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:47:12.130 INFO:teuthology.orchestra.run.vm02.stdout: mon_host = [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-03-05T23:47:12.166 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.166 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:12.166 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: Updating vm02:/etc/ceph/ceph.conf 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: Updating vm09:/etc/ceph/ceph.conf 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: Updating vm02:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: Updating vm09:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/config/ceph.conf 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/1185240389' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: Reconfiguring mon.vm02 (unknown last config time)... 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: Reconfiguring daemon mon.vm02 on vm02 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm02.trdlkm", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-05T23:47:12.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:12 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:12.210 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-05T23:47:12.210 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:47:12.210 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.conf 2026-03-05T23:47:12.278 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:47:12.278 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-05T23:47:12.346 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:47:12.346 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.conf 2026-03-05T23:47:12.376 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:47:12.376 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-05T23:47:12.448 INFO:tasks.cephadm:Deploying OSDs... 2026-03-05T23:47:12.448 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:47:12.448 DEBUG:teuthology.orchestra.run.vm02:> dd if=/scratch_devs of=/dev/stdout 2026-03-05T23:47:12.474 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-05T23:47:12.474 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/nvme0n1 2026-03-05T23:47:12.535 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/nvme0n1 2026-03-05T23:47:12.535 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:12.535 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 728 Links: 1 Device type: 103,1 2026-03-05T23:47:12.535 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:12.535 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:12.535 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:46:10.045523357 +0100 2026-03-05T23:47:12.535 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:29.132765546 +0100 2026-03-05T23:47:12.536 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:29.132765546 +0100 2026-03-05T23:47:12.536 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:44:28.330921955 +0100 2026-03-05T23:47:12.536 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-03-05T23:47:12.600 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:47:12.600 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:47:12.600 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000178864 s, 2.9 MB/s 2026-03-05T23:47:12.601 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-03-05T23:47:12.660 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/nvme1n1 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/nvme1n1 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 740 Links: 1 Device type: 103,3 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:46:10.092523375 +0100 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:29.585474848 +0100 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:29.585474848 +0100 2026-03-05T23:47:12.718 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:44:28.448921999 +0100 2026-03-05T23:47:12.718 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-03-05T23:47:12.783 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:47:12.784 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:47:12.784 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000191367 s, 2.7 MB/s 2026-03-05T23:47:12.785 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-03-05T23:47:12.870 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/nvme2n1 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/nvme2n1 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 752 Links: 1 Device type: 103,5 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:46:10.138523392 +0100 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:30.021492105 +0100 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:30.021492105 +0100 2026-03-05T23:47:12.934 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:44:28.582922049 +0100 2026-03-05T23:47:12.934 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-03-05T23:47:13.008 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:47:13.008 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:47:13.008 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000145022 s, 3.5 MB/s 2026-03-05T23:47:13.009 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-03-05T23:47:13.069 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/nvme3n1 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/nvme3n1 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 765 Links: 1 Device type: 103,7 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-05 23:46:10.178523407 +0100 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-05 23:44:30.452491947 +0100 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-05 23:44:30.452491947 +0100 2026-03-05T23:47:13.131 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-05 23:44:28.709101753 +0100 2026-03-05T23:47:13.131 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: Reconfiguring mgr.vm02.trdlkm (unknown last config time)... 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: Reconfiguring daemon mgr.vm02.trdlkm on vm02 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2149788444' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: Reconfiguring ceph-exporter.vm02 (monmap changed)... 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: Reconfiguring daemon ceph-exporter.vm02 on vm02 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-05T23:47:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:13 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: Reconfiguring mgr.vm02.trdlkm (unknown last config time)... 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: Reconfiguring daemon mgr.vm02.trdlkm on vm02 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2149788444' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: Reconfiguring ceph-exporter.vm02 (monmap changed)... 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: Reconfiguring daemon ceph-exporter.vm02 on vm02 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-05T23:47:13.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:13 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:13.198 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-05T23:47:13.199 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-05T23:47:13.199 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000217126 s, 2.4 MB/s 2026-03-05T23:47:13.200 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-03-05T23:47:13.261 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:47:13.261 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-03-05T23:47:13.279 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-03-05T23:47:13.279 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme0n1 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme0n1 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 728 Links: 1 Device type: 103,1 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:46:54.903982257 +0100 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:32.708044050 +0100 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:32.708044050 +0100 2026-03-05T23:47:13.343 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:44:31.781043722 +0100 2026-03-05T23:47:13.343 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-03-05T23:47:13.411 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:47:13.411 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:47:13.411 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000179186 s, 2.9 MB/s 2026-03-05T23:47:13.412 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-03-05T23:47:13.479 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme1n1 2026-03-05T23:47:13.541 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme1n1 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 741 Links: 1 Device type: 103,3 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:46:54.934982465 +0100 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:33.175044215 +0100 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:33.175044215 +0100 2026-03-05T23:47:13.542 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:44:31.943043779 +0100 2026-03-05T23:47:13.542 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-03-05T23:47:13.607 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:47:13.607 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:47:13.607 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000168134 s, 3.0 MB/s 2026-03-05T23:47:13.608 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-03-05T23:47:13.668 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme2n1 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme2n1 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 752 Links: 1 Device type: 103,5 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:46:54.972982719 +0100 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:33.647044382 +0100 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:33.647044382 +0100 2026-03-05T23:47:13.726 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:44:32.041043814 +0100 2026-03-05T23:47:13.726 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-03-05T23:47:13.792 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:47:13.792 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:47:13.792 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000172302 s, 3.0 MB/s 2026-03-05T23:47:13.793 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-03-05T23:47:13.859 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme3n1 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme3n1 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 764 Links: 1 Device type: 103,7 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-05 23:46:55.016983014 +0100 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-05 23:44:34.113044547 +0100 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-05 23:44:34.113044547 +0100 2026-03-05T23:47:13.923 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-05 23:44:32.182043863 +0100 2026-03-05T23:47:13.923 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-03-05T23:47:13.987 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-05T23:47:13.987 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-05T23:47:13.987 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000221535 s, 2.3 MB/s 2026-03-05T23:47:13.988 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-03-05T23:47:14.049 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph orch apply osd --all-available-devices 2026-03-05T23:47:14.438 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: Reconfiguring crash.vm02 (monmap changed)... 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: Reconfiguring daemon crash.vm02 on vm02 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: Reconfiguring alertmanager.vm02 (dependencies changed)... 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: Reconfiguring daemon alertmanager.vm02 on vm02 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: Standby manager daemon vm09.fivqds started 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.fivqds/crt"}]: dispatch 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.fivqds/key"}]: dispatch 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:14 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: Reconfiguring crash.vm02 (monmap changed)... 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: Reconfiguring daemon crash.vm02 on vm02 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: Reconfiguring alertmanager.vm02 (dependencies changed)... 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: Reconfiguring daemon alertmanager.vm02 on vm02 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: Standby manager daemon vm09.fivqds started 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.fivqds/crt"}]: dispatch 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm09.fivqds/key"}]: dispatch 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.? 192.168.123.109:0/1185830762' entity='mgr.vm09.fivqds' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.804 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:14 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:14.804 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled osd.all-available-devices update... 2026-03-05T23:47:14.867 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-05T23:47:14.867 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:15.279 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: Reconfiguring grafana.vm02 (dependencies changed)... 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: Reconfiguring daemon grafana.vm02 on vm02 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: mgrmap e18: vm02.trdlkm(active, since 25s), standbys: vm09.fivqds 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr metadata", "who": "vm09.fivqds", "id": "vm09.fivqds"}]: dispatch 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: Marking host: vm02 for OSDSpec preview refresh. 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: Marking host: vm09 for OSDSpec preview refresh. 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: Saving service osd.all-available-devices spec with placement * 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:15.619 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:15 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:15.788 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:15.919 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: Reconfiguring grafana.vm02 (dependencies changed)... 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: Reconfiguring daemon grafana.vm02 on vm02 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: mgrmap e18: vm02.trdlkm(active, since 25s), standbys: vm09.fivqds 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr metadata", "who": "vm09.fivqds", "id": "vm09.fivqds"}]: dispatch 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: Marking host: vm02 for OSDSpec preview refresh. 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: Marking host: vm09 for OSDSpec preview refresh. 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: Saving service osd.all-available-devices spec with placement * 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:15.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:15 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:16.648 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:16 vm02 ceph-mon[65842]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:16.648 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:16 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:16.648 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:16 vm02 ceph-mon[65842]: Reconfiguring prometheus.vm02 (dependencies changed)... 2026-03-05T23:47:16.648 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:16 vm02 ceph-mon[65842]: Reconfiguring daemon prometheus.vm02 on vm02 2026-03-05T23:47:16.648 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:16 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1695965089' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:16.920 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:16.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:16 vm09 ceph-mon[70730]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:16.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:16 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:16.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:16 vm09 ceph-mon[70730]: Reconfiguring prometheus.vm02 (dependencies changed)... 2026-03-05T23:47:16.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:16 vm09 ceph-mon[70730]: Reconfiguring daemon prometheus.vm02 on vm02 2026-03-05T23:47:16.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:16 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1695965089' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:17.286 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:17.642 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.642 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.642 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-03-05T23:47:17.642 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.fivqds", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-05T23:47:17.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:17 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:17.654 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm09.fivqds", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-05T23:47:17.681 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:17 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:17.710 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: Reconfiguring crash.vm09 (monmap changed)... 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: Reconfiguring daemon crash.vm09 on vm09 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: Reconfiguring mgr.vm09.fivqds (monmap changed)... 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: Reconfiguring daemon mgr.vm09.fivqds on vm09 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3530545358' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-05T23:47:18.644 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:18 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: Reconfiguring crash.vm09 (monmap changed)... 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: Reconfiguring daemon crash.vm09 on vm09 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: Reconfiguring mgr.vm09.fivqds (monmap changed)... 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: Reconfiguring daemon mgr.vm09.fivqds on vm09 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3530545358' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-05T23:47:18.645 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-05T23:47:18.646 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:18.646 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:18 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:47:18.711 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:19.125 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:19.543 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:19.623 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-05T23:47:19.796 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: Reconfiguring mon.vm09 (monmap changed)... 2026-03-05T23:47:19.796 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: Reconfiguring daemon mon.vm09 on vm09 2026-03-05T23:47:19.796 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-05T23:47:19.796 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:47:19.797 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:19 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3339568473' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: Reconfiguring mon.vm09 (monmap changed)... 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: Reconfiguring daemon mon.vm09 on vm09 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:47:19.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:19 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3339568473' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:20.624 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:20.650 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:20.650 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:47:20.652 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:20.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:47:20.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:47:21.003 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:21.373 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:21.443 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-05T23:47:21.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:21 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3201688980' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:21.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:21 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3201688980' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:22.443 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/355698912' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "80a98a67-33ad-48d5-92bf-362d67d3c142"}]: dispatch 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "80a98a67-33ad-48d5-92bf-362d67d3c142"}]: dispatch 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "80a98a67-33ad-48d5-92bf-362d67d3c142"}]': finished 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: osdmap e6: 1 total, 0 up, 1 in 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1346441303' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5448e1e1-7c37-4f21-9a5b-9ef4d264f02a"}]: dispatch 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1346441303' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5448e1e1-7c37-4f21-9a5b-9ef4d264f02a"}]': finished 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: osdmap e7: 2 total, 0 up, 2 in 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:22.660 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:22 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/355698912' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "80a98a67-33ad-48d5-92bf-362d67d3c142"}]: dispatch 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "80a98a67-33ad-48d5-92bf-362d67d3c142"}]: dispatch 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "80a98a67-33ad-48d5-92bf-362d67d3c142"}]': finished 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: osdmap e6: 1 total, 0 up, 1 in 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1346441303' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5448e1e1-7c37-4f21-9a5b-9ef4d264f02a"}]: dispatch 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1346441303' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5448e1e1-7c37-4f21-9a5b-9ef4d264f02a"}]': finished 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: osdmap e7: 2 total, 0 up, 2 in 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:22.833 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:22 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:22.879 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:23.254 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:23.331 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1772750842,"num_remapped_pgs":0} 2026-03-05T23:47:23.667 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:23 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/753749803' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:23.667 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:23 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1246190062' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:23.667 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:23 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3806117575' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:23.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:23 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/753749803' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:23.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:23 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1246190062' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:23.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:23 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3806117575' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:24.331 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:24.668 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:24 vm02 ceph-mon[65842]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:24.684 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:24.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:24 vm09 ceph-mon[70730]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:25.034 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:25.107 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1772750842,"num_remapped_pgs":0} 2026-03-05T23:47:25.675 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:25 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2625799654' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:25.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:25 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2625799654' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:26.108 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:26.490 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:26.876 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1297960060' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "08990667-2e5b-49f7-a33c-702dd41e5d03"}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1297960060' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "08990667-2e5b-49f7-a33c-702dd41e5d03"}]': finished 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: osdmap e8: 3 total, 0 up, 3 in 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/1459163828' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b5510aec-ded8-47ec-b142-6350ab8d840a"}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b5510aec-ded8-47ec-b142-6350ab8d840a"}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b5510aec-ded8-47ec-b142-6350ab8d840a"}]': finished 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: osdmap e9: 4 total, 0 up, 4 in 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:26.878 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:26 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3768217708' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1297960060' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "08990667-2e5b-49f7-a33c-702dd41e5d03"}]: dispatch 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1297960060' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "08990667-2e5b-49f7-a33c-702dd41e5d03"}]': finished 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: osdmap e8: 3 total, 0 up, 3 in 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/1459163828' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b5510aec-ded8-47ec-b142-6350ab8d840a"}]: dispatch 2026-03-05T23:47:26.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b5510aec-ded8-47ec-b142-6350ab8d840a"}]: dispatch 2026-03-05T23:47:26.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b5510aec-ded8-47ec-b142-6350ab8d840a"}]': finished 2026-03-05T23:47:26.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: osdmap e9: 4 total, 0 up, 4 in 2026-03-05T23:47:26.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:26.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:26.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:26.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:26.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:26 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3768217708' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:26.963 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1772750846,"num_remapped_pgs":0} 2026-03-05T23:47:27.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:27 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2588398680' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:27.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:27 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/698144129' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:27.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:27 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2588398680' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:27.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:27 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/698144129' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:27.964 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:28.325 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:28.676 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:28.759 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1772750846,"num_remapped_pgs":0} 2026-03-05T23:47:28.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:28 vm02 ceph-mon[65842]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:28.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:28 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3227839294' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:29.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:28 vm09 ceph-mon[70730]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:29.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:28 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3227839294' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:29.761 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:30.186 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:30.540 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:30.599 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1772750849,"num_remapped_pgs":0} 2026-03-05T23:47:30.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/383790440' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d"}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/383790440' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d"}]': finished 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: osdmap e10: 5 total, 0 up, 5 in 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2351424899' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5650a104-bcdb-4a1b-94c6-0cff6e580fb9"}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2351424899' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5650a104-bcdb-4a1b-94c6-0cff6e580fb9"}]': finished 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: osdmap e11: 6 total, 0 up, 6 in 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/3319205584' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1298975772' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/79871763' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/383790440' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d"}]: dispatch 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/383790440' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d"}]': finished 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: osdmap e10: 5 total, 0 up, 5 in 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2351424899' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5650a104-bcdb-4a1b-94c6-0cff6e580fb9"}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2351424899' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5650a104-bcdb-4a1b-94c6-0cff6e580fb9"}]': finished 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: osdmap e11: 6 total, 0 up, 6 in 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/3319205584' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1298975772' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:31.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/79871763' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:31.600 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:31.993 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:32.354 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:32.426 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1772750849,"num_remapped_pgs":0} 2026-03-05T23:47:33.116 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:32 vm02 ceph-mon[65842]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:33.116 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:32 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2051682889' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:33.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:32 vm09 ceph-mon[70730]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:33.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:32 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2051682889' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:33.427 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3193208800' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f348997e-4c93-43ae-8e46-fb679459a5fe"}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3193208800' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f348997e-4c93-43ae-8e46-fb679459a5fe"}]': finished 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: osdmap e12: 7 total, 0 up, 7 in 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/3935102470' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "71a13a86-ff4a-4521-97c9-abda62c60a41"}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "71a13a86-ff4a-4521-97c9-abda62c60a41"}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "71a13a86-ff4a-4521-97c9-abda62c60a41"}]': finished 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: osdmap e13: 8 total, 0 up, 8 in 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:33.729 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:33.819 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3193208800' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f348997e-4c93-43ae-8e46-fb679459a5fe"}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3193208800' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f348997e-4c93-43ae-8e46-fb679459a5fe"}]': finished 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: osdmap e12: 7 total, 0 up, 7 in 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/3935102470' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "71a13a86-ff4a-4521-97c9-abda62c60a41"}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "71a13a86-ff4a-4521-97c9-abda62c60a41"}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "71a13a86-ff4a-4521-97c9-abda62c60a41"}]': finished 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: osdmap e13: 8 total, 0 up, 8 in 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:33 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:34.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:33 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:34.181 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:34.240 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750853,"num_remapped_pgs":0} 2026-03-05T23:47:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:34 vm09 ceph-mon[70730]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:34 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/251693485' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:34 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/2872785382' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:34 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1446875457' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:34 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:47:35.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:34 vm02 ceph-mon[65842]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:35.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:34 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/251693485' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:35.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:34 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/2872785382' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-05T23:47:35.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:34 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1446875457' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:35.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:34 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:47:35.241 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:35.630 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:35.986 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:36.084 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750853,"num_remapped_pgs":0} 2026-03-05T23:47:36.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:36 vm02 ceph-mon[65842]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:36.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:36 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2920720509' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:36.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:36 vm09 ceph-mon[70730]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:36.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:36 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2920720509' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:37.085 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:37.503 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:37.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/1291892808' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf1abbb-f7a9-436e-881d-560a42db7e19"}]: dispatch 2026-03-05T23:47:37.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf1abbb-f7a9-436e-881d-560a42db7e19"}]: dispatch 2026-03-05T23:47:37.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4cf1abbb-f7a9-436e-881d-560a42db7e19"}]': finished 2026-03-05T23:47:37.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: osdmap e14: 9 total, 0 up, 9 in 2026-03-05T23:47:37.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:37.743 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:37.936 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/1291892808' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf1abbb-f7a9-436e-881d-560a42db7e19"}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf1abbb-f7a9-436e-881d-560a42db7e19"}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4cf1abbb-f7a9-436e-881d-560a42db7e19"}]': finished 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: osdmap e14: 9 total, 0 up, 9 in 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2528463432' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3fba7082-df8f-4423-9970-89fdc6a8aade"}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2528463432' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3fba7082-df8f-4423-9970-89fdc6a8aade"}]': finished 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: osdmap e15: 10 total, 0 up, 10 in 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.109:0/1075221999' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 9}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]': finished 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: osdmap e16: 9 total, 0 up, 9 in 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:37.984 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 9}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3826305614' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 9, "yes_i_really_mean_it": true}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3826305614' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 9, "yes_i_really_mean_it": true}]': finished 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: osdmap e17: 8 total, 0 up, 8 in 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:37.985 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:37 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:38.052 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2528463432' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3fba7082-df8f-4423-9970-89fdc6a8aade"}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2528463432' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3fba7082-df8f-4423-9970-89fdc6a8aade"}]': finished 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: osdmap e15: 10 total, 0 up, 10 in 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.109:0/1075221999' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 8}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 9}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]: dispatch 2026-03-05T23:47:38.119 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 8, "yes_i_really_mean_it": true}]': finished 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: osdmap e16: 9 total, 0 up, 9 in 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 9}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3826305614' entity='client.bootstrap-osd' cmd=[{"prefix": "osd purge-new", "id": 9, "yes_i_really_mean_it": true}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3826305614' entity='client.bootstrap-osd' cmd='[{"prefix": "osd purge-new", "id": 9, "yes_i_really_mean_it": true}]': finished 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: osdmap e17: 8 total, 0 up, 8 in 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-05T23:47:38.120 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:37 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-05T23:47:38.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:38 vm02 ceph-mon[65842]: pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:38.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:38 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3598766812' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:38.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:38 vm02 ceph-mon[65842]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:47:39.054 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:39.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:38 vm09 ceph-mon[70730]: pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:39.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:38 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3598766812' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:39.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:38 vm09 ceph-mon[70730]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:47:39.414 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:39.761 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:39.820 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:40.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:39 vm09 ceph-mon[70730]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-05T23:47:40.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_id: all-available-devices 2026-03-05T23:47:40.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_name: osd.all-available-devices 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: placement: 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: host_pattern: '*' 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: spec: 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_devices: 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: all: true 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: filter_logic: AND 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: objectstore: bluestore 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 /dev/nvme0n1 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98" successfully created 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142" created. 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 80a98a67-33ad-48d5-92bf-362d67d3c142 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:22.878+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:23.141+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d /dev/nvme1n1 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d" successfully created 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a" created. 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.3 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/ 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid b5510aec-ded8-47ec-b142-6350ab8d840a --setuser ceph --setgroup ceph 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:26.845+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3//block) No valid bdev label found 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:27.106+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 3 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf /dev/nvme2n1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf" successfully created 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d" created. 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --setuser ceph --setgroup ceph 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.270+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.536+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 /dev/nvme3n1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1" successfully created 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41" created. 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-7/activate.monmap 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.7 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/keyring 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/ 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 7 --monmap /var/lib/ceph/osd/ceph-7/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-7/ --osd-uuid 71a13a86-ff4a-4521-97c9-abda62c60a41 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:33.946+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7//block) No valid bdev label found 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:34.211+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7/) _read_fsid unparsable uuid 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 7 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4cf1abbb-f7a9-436e-881d-560a42db7e19 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-45cc0b30-ad10-40ad-a93b-6cbc33ae0bc0 /dev/vdb 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID RJZoaqboRH172Zw9Dg6qpIz8SdmQeLNC. 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: if self._apply_service(spec): 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return future.result(timeout) 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.__get_result() 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise self._exception 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return await gather(*futures) 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret_msg = await self.create_single_host( 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-05T23:47:40.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise RuntimeError( 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 /dev/nvme0n1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98" successfully created 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142" created. 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 80a98a67-33ad-48d5-92bf-362d67d3c142 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:22.878+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:23.141+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d /dev/nvme1n1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d" successfully created 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a" created. 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.3 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/ 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid b5510aec-ded8-47ec-b142-6350ab8d840a --setuser ceph --setgroup ceph 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:26.845+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3//block) No valid bdev label found 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:27.106+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 3 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.184 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf /dev/nvme2n1 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf" successfully created 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d" created. 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --setuser ceph --setgroup ceph 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.270+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.536+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 /dev/nvme3n1 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1" successfully created 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41" created. 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-7/activate.monmap 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.7 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/keyring 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/ 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 7 --monmap /var/lib/ceph/osd/ceph-7/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-7/ --osd-uuid 71a13a86-ff4a-4521-97c9-abda62c60a41 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:33.946+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7//block) No valid bdev label found 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:34.211+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7/) _read_fsid unparsable uuid 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 7 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4cf1abbb-f7a9-436e-881d-560a42db7e19 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-45cc0b30-ad10-40ad-a93b-6cbc33ae0bc0 /dev/vdb 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID RJZoaqboRH172Zw9Dg6qpIz8SdmQeLNC. 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-05T23:47:40.185 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.186 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:39 vm09 ceph-mon[70730]: pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:39 vm02 ceph-mon[65842]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: service_id: all-available-devices 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: service_name: osd.all-available-devices 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: placement: 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: host_pattern: '*' 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: spec: 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: data_devices: 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: all: true 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: filter_logic: AND 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: objectstore: bluestore 2026-03-05T23:47:40.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 /dev/nvme0n1 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98" successfully created 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142" created. 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 80a98a67-33ad-48d5-92bf-362d67d3c142 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:22.878+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:23.141+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d /dev/nvme1n1 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d" successfully created 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a" created. 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.3 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/ 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid b5510aec-ded8-47ec-b142-6350ab8d840a --setuser ceph --setgroup ceph 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:26.845+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3//block) No valid bdev label found 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:27.106+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 3 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf /dev/nvme2n1 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf" successfully created 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d" created. 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --setuser ceph --setgroup ceph 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.270+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.536+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 /dev/nvme3n1 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1" successfully created 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41" created. 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-7/activate.monmap 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.7 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/keyring 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/ 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 7 --monmap /var/lib/ceph/osd/ceph-7/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-7/ --osd-uuid 71a13a86-ff4a-4521-97c9-abda62c60a41 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:33.946+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7//block) No valid bdev label found 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:34.211+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7/) _read_fsid unparsable uuid 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 7 2026-03-05T23:47:40.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4cf1abbb-f7a9-436e-881d-560a42db7e19 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-45cc0b30-ad10-40ad-a93b-6cbc33ae0bc0 /dev/vdb 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID RJZoaqboRH172Zw9Dg6qpIz8SdmQeLNC. 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: exec(code, run_globals) 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: if self._apply_service(spec): 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return future.result(timeout) 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return self.__get_result() 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: raise self._exception 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return await gather(*futures) 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ret_msg = await self.create_single_host( 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: raise RuntimeError( 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> passed data devices: 8 physical, 0 LVM 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 /dev/nvme0n1 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme0n1" successfully created. 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Creating devices file /etc/lvm/devices/system.devices 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98" successfully created 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98 2026-03-05T23:47:40.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142" created. 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.0 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 80a98a67-33ad-48d5-92bf-362d67d3c142 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:22.878+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:23.141+0000 7fb627879740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme0n1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-9ff0c6b1-659a-4f8e-ba0b-04ccf2578b98/osd-block-80a98a67-33ad-48d5-92bf-362d67d3c142 /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 0 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme0n1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d /dev/nvme1n1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme1n1" successfully created. 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d" successfully created 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a" created. 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.3 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/ 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid b5510aec-ded8-47ec-b142-6350ab8d840a --setuser ceph --setgroup ceph 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:26.845+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3//block) No valid bdev label found 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:27.106+0000 7f95b881b740 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme1n1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7f75f27f-9cda-41e1-8ef7-22bbfb61e44d/osd-block-b5510aec-ded8-47ec-b142-6350ab8d840a /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 3 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme1n1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf /dev/nvme2n1 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme2n1" successfully created. 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf" successfully created 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d" created. 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d 2026-03-05T23:47:40.195 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.4 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --setuser ceph --setgroup ceph 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.270+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4//block) No valid bdev label found 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:30.536+0000 7f92679f8740 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme2n1 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-7ba6c494-0090-4cbf-9672-ab0cc9474baf/osd-block-ba9d4e7c-a5b3-47c8-905b-0cbf3d88925d /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 4 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme2n1 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 /dev/nvme3n1 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Physical volume "/dev/nvme3n1" successfully created. 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Volume group "ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1" successfully created 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/lvcreate --yes -l 5119 -n osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stdout: Logical volume "osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41" created. 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -s /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-7/activate.monmap 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: got monmap epoch 2 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Creating keyring file for osd.7 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/keyring 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/ 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 7 --monmap /var/lib/ceph/osd/ceph-7/activate.monmap --keyfile - --osdspec-affinity all-available-devices --osd-data /var/lib/ceph/osd/ceph-7/ --osd-uuid 71a13a86-ff4a-4521-97c9-abda62c60a41 --setuser ceph --setgroup ceph 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:33.946+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7//block) No valid bdev label found 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: 2026-03-05T22:47:34.211+0000 7fa75bb2c740 -1 bluestore(/var/lib/ceph/osd/ceph-7/) _read_fsid unparsable uuid 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm prepare successful for: /dev/nvme3n1 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ln -snf /dev/ceph-0567598c-9291-43ec-8437-6cddeb2cb0d1/osd-block-71a13a86-ff4a-4521-97c9-abda62c60a41 /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm activate successful for osd ID: 7 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> ceph-volume lvm create successful for: /dev/nvme3n1 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4cf1abbb-f7a9-436e-881d-560a42db7e19 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: nsenter --mount=/rootfs/proc/1/ns/mnt --ipc=/rootfs/proc/1/ns/ipc --net=/rootfs/proc/1/ns/net --uts=/rootfs/proc/1/ns/uts /sbin/vgcreate --force --yes ceph-45cc0b30-ad10-40ad-a93b-6cbc33ae0bc0 /dev/vdb 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: Failed to read lvm info for /dev/vdb PVID RJZoaqboRH172Zw9Dg6qpIz8SdmQeLNC. 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Was unable to complete a new OSD, will rollback changes 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.8 --yes-i-really-mean-it 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr stderr: purged osd.8 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> No OSD identified by "8" was found among LVM-based OSDs. 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr --> Proceeding to check RAW-based OSDs. 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr No OSD were found. 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:47:40.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: exec(code, run_globals) 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpxs41r_4z:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpko9me24s:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:47:40.197 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:39 vm02 ceph-mon[65842]: pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:40.821 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:41.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:40 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1704657757' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:41.183 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:41.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:40 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1704657757' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:41.600 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:41.667 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:41.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:41 vm02 ceph-mon[65842]: pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:41.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:41 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1896604107' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:42.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:41 vm09 ceph-mon[70730]: pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:42.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:41 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1896604107' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:42.668 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:43.049 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:43.389 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:43.468 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:43 vm09 ceph-mon[70730]: pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:43 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1776413221' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:44.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:43 vm02 ceph-mon[65842]: pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:44.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:43 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1776413221' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:44.469 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:44.814 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:45.153 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:45.235 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:46.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:45 vm09 ceph-mon[70730]: pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:46.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:45 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/339980289' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:46.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:45 vm02 ceph-mon[65842]: pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:46.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:45 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/339980289' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:46.236 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:46.600 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:46.964 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:47.060 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:48.061 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:48.086 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:47 vm02 ceph-mon[65842]: pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:48.086 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:47 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1578133177' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:48.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:47 vm09 ceph-mon[70730]: pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:48.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:47 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1578133177' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:48.394 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:48.757 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:48.818 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:49.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:48 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2848226598' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:49.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:48 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2848226598' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:49.819 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:50.157 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:50.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:49 vm09 ceph-mon[70730]: pgmap v35: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:50.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:49 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:50.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:49 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:47:50.182 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:49 vm02 ceph-mon[65842]: pgmap v35: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:50.182 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:49 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:47:50.182 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:49 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:47:50.492 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:50.544 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:50.806 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:50 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2731694506' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:51.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:50 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2731694506' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:51.545 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:51.901 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:51.925 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:51 vm02 ceph-mon[65842]: pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:52.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:51 vm09 ceph-mon[70730]: pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:52.231 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:52.308 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:53.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:52 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/694497862' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:53.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:52 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/694497862' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:53.308 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:53.648 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:53.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:53 vm02 ceph-mon[65842]: pgmap v37: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:53.985 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:54.054 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:54.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:53 vm09 ceph-mon[70730]: pgmap v37: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:55.055 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:55.079 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:54 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3296198600' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:55.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:54 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3296198600' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:55.388 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:55.709 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:55.776 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:56.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:55 vm09 ceph-mon[70730]: pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:56.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:55 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/687056241' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:56.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:55 vm02 ceph-mon[65842]: pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:56.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:55 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/687056241' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:56.776 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:57.121 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:57.454 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:57.534 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:47:58.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:57 vm09 ceph-mon[70730]: pgmap v39: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:58.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:57 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1297040354' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:58.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:57 vm02 ceph-mon[65842]: pgmap v39: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:47:58.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:57 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1297040354' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:47:58.535 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:47:58.867 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:47:59.210 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:47:59.270 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:00.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:59 vm09 ceph-mon[70730]: pgmap v40: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:00.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:47:59 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2738544693' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:59 vm02 ceph-mon[65842]: pgmap v40: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:47:59 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2738544693' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:00.271 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:00.622 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:00.975 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:01.056 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:02.057 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:02.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:01 vm09 ceph-mon[70730]: pgmap v41: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:02.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:01 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1205488657' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:02.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:01 vm02 ceph-mon[65842]: pgmap v41: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:02.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:01 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1205488657' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:02.400 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:02.761 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:02.814 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:03.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:02 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3290392071' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:03.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:02 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3290392071' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:03.815 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:04.164 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:04.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:03 vm09 ceph-mon[70730]: pgmap v42: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:04.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:03 vm02 ceph-mon[65842]: pgmap v42: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:04.531 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:04.610 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:04.866 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:04.866 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:04 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3764604125' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:05.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:05.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:04 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3764604125' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:05.610 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:05.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:05 vm02 ceph-mon[65842]: pgmap v43: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:05.960 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:06.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:05 vm09 ceph-mon[70730]: pgmap v43: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:06.293 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:06.377 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:07.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:06 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4250539757' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:07.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:06 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4250539757' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:07.378 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:07.740 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:08.104 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:08.104 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:07 vm02 ceph-mon[65842]: pgmap v44: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:08.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:07 vm09 ceph-mon[70730]: pgmap v44: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:08.195 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:09.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:08 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2792957221' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:09.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:08 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2792957221' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:09.196 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:09.535 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:09.883 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:09.973 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:10.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:09 vm09 ceph-mon[70730]: pgmap v45: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:10.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:09 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/91891630' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:10.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:09 vm02 ceph-mon[65842]: pgmap v45: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:10.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:09 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/91891630' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:10.973 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:11.317 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:11.654 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:11.730 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:11.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:11 vm02 ceph-mon[65842]: pgmap v46: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:11.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:11 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3193717410' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:12.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:11 vm09 ceph-mon[70730]: pgmap v46: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:12.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:11 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3193717410' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:12.732 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:13.085 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:13.429 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:13.508 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:14.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:13 vm09 ceph-mon[70730]: pgmap v47: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:14.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:13 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2770833153' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:14.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:13 vm02 ceph-mon[65842]: pgmap v47: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:14.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:13 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2770833153' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:14.508 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:14.845 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:15.184 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:15.262 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:16.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:15 vm09 ceph-mon[70730]: pgmap v48: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:16.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:15 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1318656262' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:16.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:15 vm02 ceph-mon[65842]: pgmap v48: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:16.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:15 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1318656262' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:16.263 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:16.576 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:16.908 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:16.969 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:17.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:16 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1074878133' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:17.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:16 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1074878133' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:17.969 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:18.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:17 vm09 ceph-mon[70730]: pgmap v49: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:18.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:17 vm02 ceph-mon[65842]: pgmap v49: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:18.287 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:18.620 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:18.698 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:19.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:19 vm09 ceph-mon[70730]: pgmap v50: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:19.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:19 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1248846260' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:19.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:19 vm02 ceph-mon[65842]: pgmap v50: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:19.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:19 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1248846260' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:19.699 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:20.012 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:20 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:20.035 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:20.389 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:20.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:20 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:20.450 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:21.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:21 vm09 ceph-mon[70730]: pgmap v51: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:21.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:21 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/338056319' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:21.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:21 vm02 ceph-mon[65842]: pgmap v51: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:21.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:21 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/338056319' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:21.452 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:21.777 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:22.098 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:22.177 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:22.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:22 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4283127171' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:22.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:22 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4283127171' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:23.179 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:23.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:23 vm09 ceph-mon[70730]: pgmap v52: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:23.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:23 vm02 ceph-mon[65842]: pgmap v52: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:23.516 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:23.844 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:23.899 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:24.162 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:24 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4026626914' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:24.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:24 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4026626914' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:24.900 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:25.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:25 vm02 ceph-mon[65842]: pgmap v53: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:25.232 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:25.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:25 vm09 ceph-mon[70730]: pgmap v53: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:25.567 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:25.658 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:26.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:26 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/235502312' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:26.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:26 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/235502312' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:26.659 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:26.979 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:27.315 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:27 vm02 ceph-mon[65842]: pgmap v54: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:27.315 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:27.383 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:27.469 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:27 vm09 ceph-mon[70730]: pgmap v54: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:28.384 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:28.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:28 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4200491736' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:28.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:28 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4200491736' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:28.739 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:29.138 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:29.197 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:29.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:29 vm02 ceph-mon[65842]: pgmap v55: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:29.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:29 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2674114444' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:29.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:29 vm09 ceph-mon[70730]: pgmap v55: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:29.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:29 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2674114444' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:30.197 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:30.534 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:30.888 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:30.966 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:31.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:31 vm09 ceph-mon[70730]: pgmap v56: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:31.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:31 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2613321534' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:31.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:31 vm02 ceph-mon[65842]: pgmap v56: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:31.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:31 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2613321534' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:31.967 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:32.321 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:32.650 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:32.706 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:33.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:33 vm09 ceph-mon[70730]: pgmap v57: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:33.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:33 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2550172334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:33.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:33 vm02 ceph-mon[65842]: pgmap v57: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:33.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:33 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2550172334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:33.707 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:34.069 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:34.416 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:34.468 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:35.469 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:35.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:35 vm09 ceph-mon[70730]: pgmap v58: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:35.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:35 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2796795368' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:35.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:35 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:35.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:35 vm02 ceph-mon[65842]: pgmap v58: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:35.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:35 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2796795368' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:35.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:35 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:35.839 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:36.174 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:36.249 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:36.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:36 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1200079327' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:36.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:36 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1200079327' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:37.250 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:37.609 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:37.635 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:37 vm02 ceph-mon[65842]: pgmap v59: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:37.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:37 vm09 ceph-mon[70730]: pgmap v59: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:37.978 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:38.047 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:38.312 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:38 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2806275160' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:38.312 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:38 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:48:38.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:38 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2806275160' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:38.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:38 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:48:39.047 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:39.488 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:39.566 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:39 vm02 ceph-mon[65842]: pgmap v60: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:39.566 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:39 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:39.566 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:39 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:39.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:39 vm09 ceph-mon[70730]: pgmap v60: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:39.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:39 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:39.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:39 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:39.889 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:39.976 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/666910010' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:48:40.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:40 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:48:40.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/666910010' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:48:40.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:40 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:48:40.976 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:41.355 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:41.768 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:41.851 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:42.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:41 vm09 ceph-mon[70730]: pgmap v61: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:42.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:41 vm09 ceph-mon[70730]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-05T23:48:42.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:41 vm09 ceph-mon[70730]: Cluster is now healthy 2026-03-05T23:48:42.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:41 vm02 ceph-mon[65842]: pgmap v61: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:42.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:41 vm02 ceph-mon[65842]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-05T23:48:42.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:41 vm02 ceph-mon[65842]: Cluster is now healthy 2026-03-05T23:48:42.741 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:42 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/155491722' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:42.851 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:43.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:42 vm09 ceph-mon[70730]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-05T23:48:43.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_id: all-available-devices 2026-03-05T23:48:43.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_name: osd.all-available-devices 2026-03-05T23:48:43.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout: placement: 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: host_pattern: '*' 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: spec: 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_devices: 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: all: true 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: filter_logic: AND 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: objectstore: bluestore 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-05T23:48:43.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: if self._apply_service(spec): 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return future.result(timeout) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.__get_result() 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise self._exception 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return await gather(*futures) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret_msg = await self.create_single_host( 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise RuntimeError( 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:48:43.182 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.183 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:42 vm09 ceph-mon[70730]: pgmap v62: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:42 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/155491722' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:42 vm02 ceph-mon[65842]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: service_id: all-available-devices 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: service_name: osd.all-available-devices 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: placement: 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: host_pattern: '*' 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: spec: 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: data_devices: 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: all: true 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: filter_logic: AND 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: objectstore: bluestore 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:48:43.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: exec(code, run_globals) 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:48:43.192 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: if self._apply_service(spec): 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return future.result(timeout) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return self.__get_result() 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: raise self._exception 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return await gather(*futures) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ret_msg = await self.create_single_host( 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: raise RuntimeError( 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:48:43.193 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: exec(code, run_globals) 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm02 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpgf78adpj:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmppdjyho9i:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:48:43.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:42 vm02 ceph-mon[65842]: pgmap v62: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:43.221 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:43.574 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:43.633 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:43.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:43 vm02 ceph-mon[65842]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:48:43.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:43 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1550878406' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:43 vm09 ceph-mon[70730]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:48:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:43 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1550878406' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:44.635 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:44.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:44 vm02 ceph-mon[65842]: pgmap v63: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:45.001 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:45.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:44 vm09 ceph-mon[70730]: pgmap v63: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:45.367 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:45.574 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:46.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:45 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3101977890' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:46.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:45 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3101977890' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:46.575 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:46.933 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:46.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:46 vm02 ceph-mon[65842]: pgmap v64: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:47.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:46 vm09 ceph-mon[70730]: pgmap v64: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:47.304 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:47.395 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:48.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:47 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2656247179' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:48.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:47 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2656247179' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:48.396 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:48.732 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:49.094 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:49.094 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:48 vm02 ceph-mon[65842]: pgmap v65: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:49.168 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:49.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:48 vm09 ceph-mon[70730]: pgmap v65: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:50.169 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:50.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:49 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3094291247' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:50.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:49 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:50.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:49 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3094291247' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:50.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:49 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:48:50.540 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:50.886 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:50 vm02 ceph-mon[65842]: pgmap v66: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:50.902 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:50.984 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:51.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:50 vm09 ceph-mon[70730]: pgmap v66: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:51.986 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:52.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:51 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2644654578' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:52.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:51 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2644654578' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:52.363 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:52.717 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:52.791 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:53.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:52 vm09 ceph-mon[70730]: pgmap v67: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:53.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:52 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2640342687' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:53.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:52 vm02 ceph-mon[65842]: pgmap v67: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:53.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:52 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2640342687' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:53.792 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:54.149 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:54.545 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:54.633 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:55.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:54 vm09 ceph-mon[70730]: pgmap v68: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:55.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:54 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1627532244' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:55.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:54 vm02 ceph-mon[65842]: pgmap v68: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:55.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:54 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1627532244' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:55.634 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:56.001 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:56.355 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:56.414 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:57.415 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:57.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:56 vm09 ceph-mon[70730]: pgmap v69: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:57.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:56 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/871706635' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:57.438 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:56 vm02 ceph-mon[65842]: pgmap v69: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:57.438 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:56 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/871706635' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:57.764 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:58.139 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:57 vm02 ceph-mon[65842]: pgmap v70: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:58.140 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:58.205 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:48:58.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:57 vm09 ceph-mon[70730]: pgmap v70: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:48:59.206 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:48:59.230 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:58 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2257410791' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:59.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:58 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2257410791' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:48:59.572 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:48:59.924 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:48:59.987 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:59 vm02 ceph-mon[65842]: pgmap v71: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:00.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:48:59 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/829297936' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:00.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:59 vm09 ceph-mon[70730]: pgmap v71: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:00.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:48:59 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/829297936' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:00.988 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:01.342 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:01.695 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:01.778 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:02.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:01 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/800097823' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:02.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:01 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/800097823' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:02.779 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:03.130 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:03.155 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:02 vm02 ceph-mon[65842]: pgmap v72: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:03.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:02 vm09 ceph-mon[70730]: pgmap v72: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:03.475 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:03.552 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:03.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:03 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2537548964' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:04.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:03 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2537548964' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:04.552 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:04.918 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:04.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:04 vm02 ceph-mon[65842]: pgmap v73: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:04.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:05.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:04 vm09 ceph-mon[70730]: pgmap v73: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:05.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:05.285 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:05.375 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:06.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:05 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3075017605' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:06.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:05 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3075017605' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:06.376 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:06.739 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:07.084 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:06 vm02 ceph-mon[65842]: pgmap v74: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:07.084 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:07.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:06 vm09 ceph-mon[70730]: pgmap v74: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:07.190 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:08.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:07 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2847365429' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:08.190 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:08.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:07 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2847365429' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:08.562 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:08.914 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:08.914 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:08 vm02 ceph-mon[65842]: pgmap v75: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:08.998 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:09.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:08 vm09 ceph-mon[70730]: pgmap v75: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:09.999 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:10.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:09 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1282298584' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:10.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:09 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1282298584' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:10.369 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:10.718 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:10.794 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:11.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:10 vm09 ceph-mon[70730]: pgmap v76: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:11.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:10 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/33781298' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:11.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:10 vm02 ceph-mon[65842]: pgmap v76: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:11.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:10 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/33781298' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:11.795 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:12.170 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:12.522 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:12.608 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:12 vm09 ceph-mon[70730]: pgmap v77: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:12 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2574895756' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:13.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:12 vm02 ceph-mon[65842]: pgmap v77: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:13.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:12 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2574895756' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:13.608 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:13.992 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:14.353 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:14.422 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:15.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:14 vm09 ceph-mon[70730]: pgmap v78: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:15.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:14 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/912131386' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:15.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:14 vm02 ceph-mon[65842]: pgmap v78: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:15.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:14 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/912131386' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:15.423 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:15.773 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:16.116 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:16.198 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:17.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:16 vm09 ceph-mon[70730]: pgmap v79: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:17.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:16 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3052283681' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:17.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:16 vm02 ceph-mon[65842]: pgmap v79: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:17.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:16 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3052283681' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:17.199 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:17.544 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:17.899 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:17.969 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:18.970 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:19.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:18 vm09 ceph-mon[70730]: pgmap v80: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:19.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:18 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2760641255' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:19.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:18 vm02 ceph-mon[65842]: pgmap v80: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:19.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:18 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2760641255' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:19.316 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:19.669 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:19.728 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:20.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:19 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:20.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:19 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/720573918' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:20.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:19 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:20.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:19 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/720573918' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:20.729 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:21.071 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:21.095 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:20 vm02 ceph-mon[65842]: pgmap v81: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:21.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:20 vm09 ceph-mon[70730]: pgmap v81: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:21.412 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:21.464 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:22.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:21 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3873850076' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:22.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:21 vm02 ceph-mon[65842]: pgmap v82: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:22.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:21 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3873850076' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:22.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:21 vm09 ceph-mon[70730]: pgmap v82: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:22.464 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:22.800 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:23.139 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:23.214 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:23.430 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:23 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/530712621' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:23.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:23 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/530712621' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:24.214 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:24.553 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:24.580 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:24 vm02 ceph-mon[65842]: pgmap v83: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:24.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:24 vm09 ceph-mon[70730]: pgmap v83: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:24.895 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:24.970 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:25.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:25 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2365304655' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:25.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:25 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2365304655' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:25.971 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:26.269 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:26 vm02 ceph-mon[65842]: pgmap v84: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:26.314 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:26.662 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:26.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:26 vm09 ceph-mon[70730]: pgmap v84: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:26.731 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:27.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:27 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1928607051' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:27.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:27 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1928607051' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:27.731 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:28.067 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:28.397 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:28 vm02 ceph-mon[65842]: pgmap v85: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:28.397 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:28.448 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:28.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:28 vm09 ceph-mon[70730]: pgmap v85: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:29.449 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:29.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:29 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1527608644' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:29.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:29 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1527608644' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:29.807 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:30.161 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:30.292 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:30.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:30 vm02 ceph-mon[65842]: pgmap v86: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:30.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2241533294' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:30.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:30 vm09 ceph-mon[70730]: pgmap v86: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:30.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2241533294' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:31.293 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:31.639 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:32.006 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:32.057 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:33.058 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:33.084 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:32 vm02 ceph-mon[65842]: pgmap v87: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:33.084 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:32 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4125413919' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:33.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:32 vm09 ceph-mon[70730]: pgmap v87: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:33.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:32 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4125413919' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:33.432 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:33.773 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:33.850 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:34.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:33 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4046677228' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:34.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:33 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4046677228' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:34.851 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:34 vm09 ceph-mon[70730]: pgmap v88: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:34 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:35.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:34 vm02 ceph-mon[65842]: pgmap v88: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:35.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:34 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:35.202 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:35.534 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:35.612 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:35.875 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:35 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2894076081' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:36.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:35 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2894076081' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:36.613 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:36.979 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:37.380 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:36 vm02 ceph-mon[65842]: pgmap v89: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:37.380 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:37.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:36 vm09 ceph-mon[70730]: pgmap v89: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:37.470 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:38.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/647933991' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:38.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:37 vm02 ceph-mon[65842]: pgmap v90: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:38.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/647933991' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:38.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:37 vm09 ceph-mon[70730]: pgmap v90: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:38.470 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:38.812 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:39.146 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:39.230 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:39.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:39 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/145101530' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:39.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:39 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/145101530' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:40.231 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:40.575 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:40.614 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:40 vm02 ceph-mon[65842]: pgmap v91: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:40.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:40 vm09 ceph-mon[70730]: pgmap v91: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:40.968 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:41.047 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:41.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:41 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4134247824' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:41.680 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:41 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4134247824' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:42.048 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:42.406 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:42 vm02 ceph-mon[65842]: pgmap v92: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:42.406 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:42 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:49:42.442 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:42.477 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:42 vm09 ceph-mon[70730]: pgmap v92: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:42.477 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:42 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:49:42.925 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:42.999 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:43.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:43 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:43.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:43 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:43.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:43 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/320765719' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:43.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:43 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:43.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:43 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:44.000 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:43 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:43 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:43 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/320765719' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:43 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:44.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:43 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:44.417 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:44.810 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:44.905 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: pgmap v93: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:49:45.181 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:44 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: pgmap v93: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:49:45.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:44 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:49:45.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:45 vm09 ceph-mon[70730]: pgmap v94: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:45.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:45 vm09 ceph-mon[70730]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-05T23:49:45.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:45 vm09 ceph-mon[70730]: Cluster is now healthy 2026-03-05T23:49:45.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:45 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2716816005' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:45.906 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:46.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:45 vm02 ceph-mon[65842]: pgmap v94: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:46.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:45 vm02 ceph-mon[65842]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-05T23:49:46.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:45 vm02 ceph-mon[65842]: Cluster is now healthy 2026-03-05T23:49:46.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:45 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2716816005' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:46.263 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:46.607 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:46.675 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:46.926 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:46 vm09 ceph-mon[70730]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_id: all-available-devices 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: service_name: osd.all-available-devices 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: placement: 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: host_pattern: '*' 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: spec: 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: data_devices: 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: all: true 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: filter_logic: AND 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: objectstore: bluestore 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:49:46.927 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: if self._apply_service(spec): 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return future.result(timeout) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return self.__get_result() 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise self._exception 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return await gather(*futures) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: ret_msg = await self.create_single_host( 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: raise RuntimeError( 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:49:46.928 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: Traceback (most recent call last): 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: exec(code, run_globals) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:46 vm09 ceph-mon[70730]: pgmap v95: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:46 vm09 ceph-mon[70730]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:49:46.929 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:46 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3576055671' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:46 vm02 ceph-mon[65842]: Failed to apply osd.all-available-devices spec DriveGroupSpec.from_json(yaml.safe_load('''service_type: osd 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: service_id: all-available-devices 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: service_name: osd.all-available-devices 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: placement: 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: host_pattern: '*' 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: spec: 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: data_devices: 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: all: true 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: filter_logic: AND 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: objectstore: bluestore 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ''')): cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:49:46.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: exec(code, run_globals) 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:49:46.942 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 602, in _apply_all_services 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: if self._apply_service(spec): 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 721, in _apply_service 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: self.mgr.osd_service.create_from_spec(cast(DriveGroupSpec, spec)) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 79, in create_from_spec 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ret = self.mgr.wait_async(all_hosts()) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return future.result(timeout) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return self.__get_result() 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: raise self._exception 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 76, in all_hosts 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return await gather(*futures) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 63, in create_from_spec_one 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: ret_msg = await self.create_single_host( 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/share/ceph/mgr/cephadm/services/osd.py", line 95, in create_single_host 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: raise RuntimeError( 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance.main() 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:49:46.943 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: Traceback (most recent call last): 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: return _run_code(code, main_globals, None, 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: exec(code, run_globals) 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:46 vm02 ceph-mon[65842]: pgmap v95: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:46 vm02 ceph-mon[65842]: Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:49:46.944 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:46 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3576055671' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:47.676 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:48.057 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:48.420 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:48.479 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:49.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:48 vm09 ceph-mon[70730]: pgmap v96: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:49.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:48 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2242700841' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:49.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:48 vm02 ceph-mon[65842]: pgmap v96: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:49.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:48 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2242700841' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:49.480 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:49.802 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:50.159 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:49 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:50.159 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:50.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:49 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:49:50.235 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:51.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:50 vm09 ceph-mon[70730]: pgmap v97: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:51.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:50 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/744334049' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:51.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:50 vm02 ceph-mon[65842]: pgmap v97: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:51.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:50 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/744334049' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:51.235 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:51.583 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:51.938 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:52.029 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:53.029 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:53.093 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:52 vm02 ceph-mon[65842]: pgmap v98: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:53.093 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:52 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/641237774' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:53.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:52 vm09 ceph-mon[70730]: pgmap v98: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:53.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:52 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/641237774' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:53.400 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:53.781 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:53.927 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:54.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:53 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1933564914' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:54.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:53 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1933564914' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:54.928 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:55.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:54 vm09 ceph-mon[70730]: pgmap v99: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:55.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:54 vm02 ceph-mon[65842]: pgmap v99: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:55.278 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:55.625 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:55.958 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:56.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:56 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3206148753' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:56.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:56 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3206148753' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:56.959 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:57.299 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:57.324 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:57 vm02 ceph-mon[65842]: pgmap v100: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:57.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:57 vm09 ceph-mon[70730]: pgmap v100: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:57.642 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:57.818 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:58.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:58 vm02 ceph-mon[65842]: pgmap v101: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:58.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:58 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/326655918' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:58.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:58 vm09 ceph-mon[70730]: pgmap v101: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:49:58.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:58 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/326655918' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:58.819 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:49:59.156 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:49:59.489 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:49:59.541 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:49:59.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:49:59 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4212849375' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:49:59.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:49:59 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4212849375' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:00.542 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:00.876 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: pgmap v102: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: Health detail: HEALTH_WARN Failed to apply 1 service(s): osd.all-available-devices 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: [WRN] CEPHADM_APPLY_SPEC_FAIL: Failed to apply 1 service(s): osd.all-available-devices 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: osd.all-available-devices: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr instance.main() 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:50:00.898 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: Traceback (most recent call last): 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: return _run_code(code, main_globals, None, 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: exec(code, run_globals) 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:50:00.899 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:00 vm02 ceph-mon[65842]: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: pgmap v102: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: Health detail: HEALTH_WARN Failed to apply 1 service(s): osd.all-available-devices 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: [WRN] CEPHADM_APPLY_SPEC_FAIL: Failed to apply 1 service(s): osd.all-available-devices 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: osd.all-available-devices: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm09/config 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: Non-zero exit code 1 from /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr Traceback (most recent call last): 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/sbin/ceph-volume", line 33, in 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr sys.exit(load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')()) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 54, in __init__ 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr self.main(self.argv) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/decorators.py", line 59, in newfunc 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr return f(*a, **kw) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/main.py", line 166, in main 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr terminal.dispatch(self.mapper, subcommand_args) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 194, in dispatch 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr instance.main() 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/main.py", line 47, in main 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr terminal.dispatch(self.mapper, self.argv) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/terminal.py", line 192, in dispatch 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr instance = mapper.get(arg)(argv[count:]) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/devices/lvm/batch.py", line 281, in __init__ 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr self.args = parser.parse_args(argv) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1825, in parse_args 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr args, argv = self.parse_known_args(args, namespace) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1858, in parse_known_args 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr namespace, args = self._parse_known_args(args, namespace) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2049, in _parse_known_args 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr positionals_end_index = consume_positionals(start_index) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2026, in consume_positionals 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr take_action(action, args) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 1919, in take_action 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr argument_values = self._get_values(action, argument_strings) 2026-03-05T23:50:00.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in _get_values 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2468, in 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr value = [self._get_value(action, v) for v in arg_strings] 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib64/python3.9/argparse.py", line 2483, in _get_value 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr result = type_func(arg_string) 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 143, in __call__ 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr return self._format_device(self._is_valid_device()) 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 154, in _is_valid_device 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr super()._is_valid_device(raise_sys_exit=False) 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 131, in _is_valid_device 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr super()._is_valid_device() 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr File "/usr/lib/python3.9/site-packages/ceph_volume/util/arg_validators.py", line 99, in _is_valid_device 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: /usr/bin/podman: stderr RuntimeError: Device /dev/vdb has a filesystem. 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: Traceback (most recent call last): 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: return _run_code(code, main_globals, None, 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: exec(code, run_globals) 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5581, in 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 5569, in main 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 409, in _infer_config 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 324, in _infer_fsid 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 437, in _infer_image 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 311, in _validate_fsid 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/__main__.py", line 3314, in command_ceph_volume 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: File "/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/cephadm.e5709e8f833aedcf34ee03465f2bbcfee475fbe4a3345dc4ee57d39fedc858ee/cephadmlib/call_wrappers.py", line 310, in call_throws 2026-03-05T23:50:00.931 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:00 vm09 ceph-mon[70730]: RuntimeError: Failed command: /usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b -e NODE_NAME=vm09 -e CEPH_VOLUME_OSDSPEC_AFFINITY=all-available-devices -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/run/ceph:z -v /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97:/var/log/ceph:z -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmp8jgw7gzr:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpo6tn6bhv:/var/lib/ceph/bootstrap-osd/ceph.keyring:z harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b lvm batch --no-auto /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/vdb /dev/vdc /dev/vdd /dev/vde --yes --no-systemd 2026-03-05T23:50:01.206 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:01.261 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:01.552 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:01 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3456823586' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:01.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:01 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3456823586' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:02.262 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:02.558 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:02 vm02 ceph-mon[65842]: pgmap v103: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:02.602 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:02.924 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:02.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:02 vm09 ceph-mon[70730]: pgmap v103: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:02.991 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:03.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:03 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2040970394' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:03.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:03 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2040970394' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:03.992 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:04.311 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:04.653 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:04.724 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:04.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:04 vm09 ceph-mon[70730]: pgmap v104: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:04.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:04 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:50:04.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:04 vm02 ceph-mon[65842]: pgmap v104: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:04.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:04 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:50:05.725 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:05.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:05 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/181177962' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:05.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:05 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/181177962' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:06.064 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:06.390 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:06.462 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:06.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:06 vm02 ceph-mon[65842]: pgmap v105: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:06.691 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:06 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3141534807' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:06.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:06 vm09 ceph-mon[70730]: pgmap v105: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:06.930 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:06 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3141534807' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:07.463 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:07.799 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:08.135 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:08.214 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:08.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:08 vm02 ceph-mon[65842]: pgmap v106: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:08.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:08 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/95886375' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:09.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:08 vm09 ceph-mon[70730]: pgmap v106: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:09.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:08 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/95886375' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:09.215 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:09.547 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:09.877 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:09.951 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:10.952 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:11.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:10 vm09 ceph-mon[70730]: pgmap v107: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:11.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:10 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/62655551' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:11.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:10 vm02 ceph-mon[65842]: pgmap v107: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:11.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:10 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/62655551' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:11.289 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:11.619 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:11.671 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:11.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:11 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2334079233' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:12.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:11 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2334079233' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:12.671 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:13.012 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:13.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:12 vm02 ceph-mon[65842]: pgmap v108: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:13.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:12 vm09 ceph-mon[70730]: pgmap v108: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:13.338 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:13.412 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:14.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:13 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2208017988' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:14.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:13 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2208017988' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:14.413 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:14.732 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:15.058 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:15.058 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:14 vm02 ceph-mon[65842]: pgmap v109: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:15.136 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:15.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:14 vm09 ceph-mon[70730]: pgmap v109: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:16.137 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:16.159 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:15 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1184761314' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:16.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:15 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1184761314' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:16.464 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:16.795 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:16.865 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:17.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:16 vm09 ceph-mon[70730]: pgmap v110: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:17.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:16 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3590867720' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:17.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:16 vm02 ceph-mon[65842]: pgmap v110: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:17.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:16 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3590867720' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:17.866 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:18.194 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:18.545 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:18.602 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:19.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:18 vm09 ceph-mon[70730]: pgmap v111: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:19.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:18 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2976968154' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:19.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:18 vm02 ceph-mon[65842]: pgmap v111: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:19.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:18 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2976968154' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:19.603 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:19.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:19 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:50:19.956 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:20.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:19 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:50:20.324 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:20.401 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:21.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:20 vm09 ceph-mon[70730]: pgmap v112: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:21.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:20 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3086163332' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:21.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:20 vm02 ceph-mon[65842]: pgmap v112: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:21.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:20 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3086163332' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:21.401 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:21.736 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:22.063 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:22.139 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:23.140 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:23.162 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:22 vm02 ceph-mon[65842]: pgmap v113: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:23.163 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:22 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3026695794' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:23.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:22 vm09 ceph-mon[70730]: pgmap v113: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:23.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:22 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3026695794' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:23.460 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:23.801 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:23.860 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:24.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:23 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1202106794' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:24.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:23 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1202106794' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:24.861 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:25.141 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:24 vm02 ceph-mon[65842]: pgmap v114: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:25.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:24 vm09 ceph-mon[70730]: pgmap v114: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:25.203 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:25.554 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:25.619 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:26.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:25 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2778996500' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:26.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:25 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2778996500' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:26.620 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:26.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:26 vm02 ceph-mon[65842]: pgmap v115: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:26.944 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:27.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:26 vm09 ceph-mon[70730]: pgmap v115: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:27.278 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:27.352 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:28.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:27 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/4030517806' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:28.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:27 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/4030517806' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:28.353 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:28.674 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:28.977 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:28 vm02 ceph-mon[65842]: pgmap v116: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:28.977 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:29.035 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:29.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:28 vm09 ceph-mon[70730]: pgmap v116: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:30.035 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:30.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:29 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3668837643' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:30.191 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:29 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3668837643' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:30.350 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:30.682 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:30.745 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:30.940 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:30 vm02 ceph-mon[65842]: pgmap v117: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:30.941 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:30 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3892635735' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:30 vm09 ceph-mon[70730]: pgmap v117: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:31.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:30 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3892635735' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:31.745 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:32.078 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:32.392 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:32.449 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:33.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:32 vm09 ceph-mon[70730]: pgmap v118: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:33.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:32 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/3529917623' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:33.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:32 vm02 ceph-mon[65842]: pgmap v118: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:33.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:32 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/3529917623' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:33.450 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:33.774 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:34.100 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:34.167 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:35.168 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:34 vm09 ceph-mon[70730]: pgmap v119: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:34 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1999996332' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:35.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:34 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:50:35.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:34 vm02 ceph-mon[65842]: pgmap v119: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:35.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:34 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1999996332' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:35.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:34 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-05T23:50:35.491 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:35.824 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:35.894 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:36.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:35 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2164578957' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:36.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:35 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2164578957' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:36.894 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:37.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:36 vm09 ceph-mon[70730]: pgmap v120: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:37.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:36 vm02 ceph-mon[65842]: pgmap v120: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:37.234 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:37.559 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:37.612 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:37.930 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:37 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2177979108' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:38.180 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:37 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2177979108' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:38.613 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:38.933 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:38 vm02 ceph-mon[65842]: pgmap v121: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:38.955 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:39.272 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:39.343 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:39.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:38 vm09 ceph-mon[70730]: pgmap v121: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:40.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:39 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1083125176' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:40.343 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:40.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:39 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1083125176' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:40.667 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:40.988 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:41.036 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:41.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:40 vm09 ceph-mon[70730]: pgmap v122: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:41.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:40 vm02 ceph-mon[65842]: pgmap v122: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:42.036 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:42.357 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:42.381 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:41 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/508916698' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:42.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:41 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/508916698' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:42.674 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:42.738 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:43.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:42 vm09 ceph-mon[70730]: pgmap v123: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:43.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:42 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1779936085' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:43.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:42 vm02 ceph-mon[65842]: pgmap v123: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:43.441 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:42 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1779936085' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:43.739 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:44.059 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:44.081 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:43 vm02 ceph-mon[65842]: pgmap v124: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:44.383 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:44.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:43 vm09 ceph-mon[70730]: pgmap v124: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:44.453 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:45.430 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:44 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/665218247' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:45.440 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:44 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/665218247' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:45.453 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:45.793 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:46.162 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:45 vm02 ceph-mon[65842]: pgmap v125: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:46.162 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:45 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:50:46.164 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:46.222 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:46.305 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:45 vm09 ceph-mon[70730]: pgmap v125: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:46.305 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:45 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-05T23:50:47.222 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 -- ceph osd stat -f json 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/1631806226' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: pgmap v126: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:50:47.230 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:46 vm09 ceph-mon[70730]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/1631806226' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: pgmap v126: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-05T23:50:47.235 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:46 vm02 ceph-mon[65842]: from='mgr.14227 192.168.123.102:0/254385904' entity='mgr.vm02.trdlkm' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-05T23:50:47.585 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/mon.vm02/config 2026-03-05T23:50:47.922 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-05T23:50:48.016 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":17,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772750857,"num_remapped_pgs":0} 2026-03-05T23:50:48.017 ERROR:teuthology.contextutil:Saw exception from nested tasks Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/contextutil.py", line 30, in nested vars.append(enter()) File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_e50baef5944c0b5e8e734db1c467f1f19415a932/qa/tasks/cephadm.py", line 1128, in ceph_osds while proceed(): File "/home/teuthos/teuthology/teuthology/contextutil.py", line 134, in __call__ raise MaxWhileTries(error_msg) teuthology.exceptions.MaxWhileTries: reached maximum tries (120) after waiting for 120 seconds 2026-03-05T23:50:48.017 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-05T23:50:48.083 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-05T23:50:48.115 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-05T23:50:48.115 DEBUG:teuthology.orchestra.run.vm02:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-05T23:50:48.138 DEBUG:teuthology.orchestra.run.vm09:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-05T23:50:48.172 INFO:tasks.cephadm:Stopping all daemons... 2026-03-05T23:50:48.172 INFO:tasks.cephadm.mon.vm02:Stopping mon.vm02... 2026-03-05T23:50:48.173 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm02 2026-03-05T23:50:48.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:47 vm02 ceph-mon[65842]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-05T23:50:48.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:47 vm02 ceph-mon[65842]: Cluster is now healthy 2026-03-05T23:50:48.190 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:47 vm02 ceph-mon[65842]: from='client.? 192.168.123.102:0/2013307649' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:48.227 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:47 vm09 ceph-mon[70730]: Health check cleared: CEPHADM_APPLY_SPEC_FAIL (was: Failed to apply 1 service(s): osd.all-available-devices) 2026-03-05T23:50:48.227 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:47 vm09 ceph-mon[70730]: Cluster is now healthy 2026-03-05T23:50:48.227 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:47 vm09 ceph-mon[70730]: from='client.? 192.168.123.102:0/2013307649' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-05T23:50:48.453 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:48 vm02 systemd[1]: Stopping Ceph mon.vm02 for e1ad3122-18e4-11f1-9926-f7644c158a97... 2026-03-05T23:50:48.453 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:48 vm02 ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm02[65838]: 2026-03-05T22:50:48.314+0000 7faf6b397640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm02 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-05T23:50:48.453 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:48 vm02 ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm02[65838]: 2026-03-05T22:50:48.314+0000 7faf6b397640 -1 mon.vm02@0(leader) e2 *** Got Signal Terminated *** 2026-03-05T23:50:48.453 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 05 23:50:48 vm02 podman[100628]: 2026-03-05 23:50:48.434371269 +0100 CET m=+0.158508097 container died 86749b4a911d31c1ee67f3b6ce2e8713df6af06acee4ac06ba4c3bf74401959b (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3, name=ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm02, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-47-gc24117fd552, CEPH_SHA1=c24117fd5525679b799527bc1bd1f1dd0a2db5e2, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-05T23:50:48.528 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm02.service' 2026-03-05T23:50:48.599 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-05T23:50:48.599 INFO:tasks.cephadm.mon.vm02:Stopped mon.vm02 2026-03-05T23:50:48.599 INFO:tasks.cephadm.mon.vm09:Stopping mon.vm09... 2026-03-05T23:50:48.599 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm09 2026-03-05T23:50:48.880 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:48 vm09 systemd[1]: Stopping Ceph mon.vm09 for e1ad3122-18e4-11f1-9926-f7644c158a97... 2026-03-05T23:50:48.880 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:48 vm09 ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09[70726]: 2026-03-05T22:50:48.693+0000 7f49c842a640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm09 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-05T23:50:48.880 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:48 vm09 ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09[70726]: 2026-03-05T22:50:48.693+0000 7f49c842a640 -1 mon.vm09@1(peon) e2 *** Got Signal Terminated *** 2026-03-05T23:50:48.880 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:48 vm09 podman[78599]: 2026-03-05 23:50:48.816948103 +0100 CET m=+0.136030029 container died 1b91af5f8dfd6eaea3878697e2f1ae95100c47b8c8923d9f207da475a9d2834e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b, name=ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09, CEPH_REF=19.2.3-47-gc24117fd552, CEPH_SHA1=c24117fd5525679b799527bc1bd1f1dd0a2db5e2, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-05T23:50:48.880 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:48 vm09 podman[78599]: 2026-03-05 23:50:48.834343112 +0100 CET m=+0.153425029 container remove 1b91af5f8dfd6eaea3878697e2f1ae95100c47b8c8923d9f207da475a9d2834e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:26363c7a4eea9ef5a0148afc7b2a22b6f486596d87a30c2a9fdcda5db3eca62b, name=ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09, CEPH_REF=19.2.3-47-gc24117fd552, CEPH_SHA1=c24117fd5525679b799527bc1bd1f1dd0a2db5e2, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-05T23:50:48.880 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Mar 05 23:50:48 vm09 bash[78599]: ceph-e1ad3122-18e4-11f1-9926-f7644c158a97-mon-vm09 2026-03-05T23:50:48.887 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e1ad3122-18e4-11f1-9926-f7644c158a97@mon.vm09.service' 2026-03-05T23:50:48.917 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-05T23:50:48.917 INFO:tasks.cephadm.mon.vm09:Stopped mon.vm09 2026-03-05T23:50:48.917 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 --force --keep-logs 2026-03-05T23:50:49.190 INFO:teuthology.orchestra.run.vm02.stdout:Deleting cluster with fsid: e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:50:52.214 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 --force --keep-logs 2026-03-05T23:50:52.493 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:50:54.566 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-05T23:50:54.593 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-05T23:50:54.619 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-05T23:50:54.619 DEBUG:teuthology.misc:Transferring archived files from vm02:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash to /archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100/remote/vm02/crash 2026-03-05T23:50:54.619 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash -- . 2026-03-05T23:50:54.660 INFO:teuthology.orchestra.run.vm02.stderr:tar: /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash: Cannot open: No such file or directory 2026-03-05T23:50:54.661 INFO:teuthology.orchestra.run.vm02.stderr:tar: Error is not recoverable: exiting now 2026-03-05T23:50:54.662 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash to /archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100/remote/vm09/crash 2026-03-05T23:50:54.662 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash -- . 2026-03-05T23:50:54.687 INFO:teuthology.orchestra.run.vm09.stderr:tar: /var/lib/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/crash: Cannot open: No such file or directory 2026-03-05T23:50:54.687 INFO:teuthology.orchestra.run.vm09.stderr:tar: Error is not recoverable: exiting now 2026-03-05T23:50:54.688 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-05T23:50:54.688 DEBUG:teuthology.orchestra.run.vm02:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v OSD_DOWN | egrep -v CEPHADM_FAILED_DAEMON | egrep -v 'but is still running' | egrep -v PG_DEGRADED | head -n 1 2026-03-05T23:50:54.732 INFO:teuthology.orchestra.run.vm02.stdout:2026-03-05T22:47:38.385867+0000 mon.vm02 (mon.0) 504 : cluster [WRN] Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:50:54.733 WARNING:tasks.cephadm:Found errors (ERR|WRN|SEC) in cluster log 2026-03-05T23:50:54.733 DEBUG:teuthology.orchestra.run.vm02:> sudo egrep '\[SEC\]' /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v OSD_DOWN | egrep -v CEPHADM_FAILED_DAEMON | egrep -v 'but is still running' | egrep -v PG_DEGRADED | head -n 1 2026-03-05T23:50:54.805 DEBUG:teuthology.orchestra.run.vm02:> sudo egrep '\[ERR\]' /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v OSD_DOWN | egrep -v CEPHADM_FAILED_DAEMON | egrep -v 'but is still running' | egrep -v PG_DEGRADED | head -n 1 2026-03-05T23:50:54.878 DEBUG:teuthology.orchestra.run.vm02:> sudo egrep '\[WRN\]' /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v OSD_DOWN | egrep -v CEPHADM_FAILED_DAEMON | egrep -v 'but is still running' | egrep -v PG_DEGRADED | head -n 1 2026-03-05T23:50:54.964 INFO:teuthology.orchestra.run.vm02.stdout:2026-03-05T22:47:38.385867+0000 mon.vm02 (mon.0) 504 : cluster [WRN] Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL) 2026-03-05T23:50:54.964 INFO:tasks.cephadm:Compressing logs... 2026-03-05T23:50:54.964 DEBUG:teuthology.orchestra.run.vm02:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-05T23:50:54.967 DEBUG:teuthology.orchestra.run.vm09:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-05T23:50:54.992 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-05T23:50:54.992 INFO:teuthology.orchestra.run.vm09.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-05T23:50:54.995 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-volume.log 2026-03-05T23:50:54.995 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/cephadm.log: 92.2% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-05T23:50:54.997 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-client.ceph-exporter.vm09.log 2026-03-05T23:50:54.997 INFO:teuthology.orchestra.run.vm09.stderr:gzip/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-volume.log: -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mgr.vm09.fivqds.log 2026-03-05T23:50:54.997 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-client.ceph-exporter.vm09.log: 30.4% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-client.ceph-exporter.vm09.log.gz 2026-03-05T23:50:55.000 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mon.vm09.log 2026-03-05T23:50:55.002 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mgr.vm09.fivqds.log: 90.8% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mgr.vm09.fivqds.log.gz 2026-03-05T23:50:55.002 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.audit.log 2026-03-05T23:50:55.003 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mon.vm09.log: 92.9% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-volume.log.gz 2026-03-05T23:50:55.003 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log 2026-03-05T23:50:55.004 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-05T23:50:55.004 INFO:teuthology.orchestra.run.vm02.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-05T23:50:55.004 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.audit.log: 90.5% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.audit.log.gz 2026-03-05T23:50:55.004 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.cephadm.log 2026-03-05T23:50:55.005 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log: 84.9% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log.gz 2026-03-05T23:50:55.005 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.0.log 2026-03-05T23:50:55.006 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.cephadm.log: 93.0% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.cephadm.log.gz 2026-03-05T23:50:55.006 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.3.log 2026-03-05T23:50:55.009 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mon.vm02.log 2026-03-05T23:50:55.010 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.0.log: 94.1% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.0.log.gz 2026-03-05T23:50:55.011 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.4.log 2026-03-05T23:50:55.013 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log 2026-03-05T23:50:55.014 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.3.log: 94.6% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.3.log.gz 2026-03-05T23:50:55.015 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.7.log 2026-03-05T23:50:55.018 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.4.log: 94.5% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.4.log.gz 2026-03-05T23:50:55.019 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mon.vm02.log: gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.audit.log 2026-03-05T23:50:55.020 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log: 85.3% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.log.gz 2026-03-05T23:50:55.022 INFO:teuthology.orchestra.run.vm02.stderr: 92.3%gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mgr.vm02.trdlkm.log 2026-03-05T23:50:55.023 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.7.log: 94.5% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.7.log.gz 2026-03-05T23:50:55.023 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.audit.log: 90.4% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-05T23:50:55.024 INFO:teuthology.orchestra.run.vm02.stderr: -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.audit.log.gz 2026-03-05T23:50:55.024 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.cephadm.log 2026-03-05T23:50:55.029 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mgr.vm02.trdlkm.log: gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-volume.log 2026-03-05T23:50:55.031 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.cephadm.log: 92.2% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph.cephadm.log.gz 2026-03-05T23:50:55.034 INFO:teuthology.orchestra.run.vm09.stderr: 92.2% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mon.vm09.log.gz 2026-03-05T23:50:55.035 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-client.ceph-exporter.vm02.log 2026-03-05T23:50:55.036 INFO:teuthology.orchestra.run.vm09.stderr: 2026-03-05T23:50:55.036 INFO:teuthology.orchestra.run.vm09.stderr:real 0m0.054s 2026-03-05T23:50:55.036 INFO:teuthology.orchestra.run.vm09.stderr:user 0m0.063s 2026-03-05T23:50:55.036 INFO:teuthology.orchestra.run.vm09.stderr:sys 0m0.018s 2026-03-05T23:50:55.042 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.1.log 2026-03-05T23:50:55.045 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-client.ceph-exporter.vm02.log: 91.6% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-client.ceph-exporter.vm02.log.gz 2026-03-05T23:50:55.048 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.2.log 2026-03-05T23:50:55.051 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.1.log: 92.8% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-volume.log.gz 2026-03-05T23:50:55.054 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.5.log 2026-03-05T23:50:55.061 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.2.log: 94.5% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.1.log.gz 2026-03-05T23:50:55.067 INFO:teuthology.orchestra.run.vm02.stderr: 94.5% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.2.log.gz 2026-03-05T23:50:55.071 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.6.log 2026-03-05T23:50:55.082 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.5.log: 94.5%/var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.6.log: -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.5.log.gz 2026-03-05T23:50:55.087 INFO:teuthology.orchestra.run.vm02.stderr: 94.3% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-osd.6.log.gz 2026-03-05T23:50:55.117 INFO:teuthology.orchestra.run.vm02.stderr: 90.9% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mgr.vm02.trdlkm.log.gz 2026-03-05T23:50:55.216 INFO:teuthology.orchestra.run.vm02.stderr: 90.3% -- replaced with /var/log/ceph/e1ad3122-18e4-11f1-9926-f7644c158a97/ceph-mon.vm02.log.gz 2026-03-05T23:50:55.218 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-05T23:50:55.218 INFO:teuthology.orchestra.run.vm02.stderr:real 0m0.234s 2026-03-05T23:50:55.218 INFO:teuthology.orchestra.run.vm02.stderr:user 0m0.217s 2026-03-05T23:50:55.218 INFO:teuthology.orchestra.run.vm02.stderr:sys 0m0.024s 2026-03-05T23:50:55.218 INFO:tasks.cephadm:Archiving logs... 2026-03-05T23:50:55.219 DEBUG:teuthology.misc:Transferring archived files from vm02:/var/log/ceph to /archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100/remote/vm02/log 2026-03-05T23:50:55.219 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-05T23:50:55.303 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/log/ceph to /archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100/remote/vm09/log 2026-03-05T23:50:55.303 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-05T23:50:55.337 INFO:tasks.cephadm:Removing cluster... 2026-03-05T23:50:55.337 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 --force 2026-03-05T23:50:55.626 INFO:teuthology.orchestra.run.vm02.stdout:Deleting cluster with fsid: e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:50:55.726 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e1ad3122-18e4-11f1-9926-f7644c158a97 --force 2026-03-05T23:50:56.006 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: e1ad3122-18e4-11f1-9926-f7644c158a97 2026-03-05T23:50:56.104 INFO:tasks.cephadm:Removing cephadm ... 2026-03-05T23:50:56.104 DEBUG:teuthology.orchestra.run.vm02:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-05T23:50:56.118 DEBUG:teuthology.orchestra.run.vm09:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-05T23:50:56.133 INFO:tasks.cephadm:Teardown complete 2026-03-05T23:50:56.134 ERROR:teuthology.run_tasks:Saw exception from tasks. Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/run_tasks.py", line 112, in run_tasks manager.__enter__() File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_e50baef5944c0b5e8e734db1c467f1f19415a932/qa/tasks/cephadm.py", line 2217, in task with contextutil.nested( File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/teuthology/teuthology/contextutil.py", line 54, in nested raise exc[1] File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 153, in __exit__ self.gen.throw(typ, value, traceback) File "/home/teuthos/src/github.com_kshtsk_ceph_e50baef5944c0b5e8e734db1c467f1f19415a932/qa/tasks/cephadm.py", line 1833, in initialize_config yield File "/home/teuthos/teuthology/teuthology/contextutil.py", line 30, in nested vars.append(enter()) File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_e50baef5944c0b5e8e734db1c467f1f19415a932/qa/tasks/cephadm.py", line 1128, in ceph_osds while proceed(): File "/home/teuthos/teuthology/teuthology/contextutil.py", line 134, in __call__ raise MaxWhileTries(error_msg) teuthology.exceptions.MaxWhileTries: reached maximum tries (120) after waiting for 120 seconds 2026-03-05T23:50:56.134 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-05T23:50:56.135 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-03-05T23:50:56.137 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm02:/dev/vdb... 2026-03-05T23:50:56.137 DEBUG:teuthology.orchestra.run.vm02:> sudo nvme disconnect -n vdb 2026-03-05T23:50:56.250 INFO:teuthology.orchestra.run.vm02.stdout:NQN:vdb disconnected 1 controller(s) 2026-03-05T23:50:56.252 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm02:/dev/vdc... 2026-03-05T23:50:56.252 DEBUG:teuthology.orchestra.run.vm02:> sudo nvme disconnect -n vdc 2026-03-05T23:50:56.345 INFO:teuthology.orchestra.run.vm02.stdout:NQN:vdc disconnected 1 controller(s) 2026-03-05T23:50:56.346 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm02:/dev/vdd... 2026-03-05T23:50:56.346 DEBUG:teuthology.orchestra.run.vm02:> sudo nvme disconnect -n vdd 2026-03-05T23:50:56.434 INFO:teuthology.orchestra.run.vm02.stdout:NQN:vdd disconnected 1 controller(s) 2026-03-05T23:50:56.435 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm02:/dev/vde... 2026-03-05T23:50:56.435 DEBUG:teuthology.orchestra.run.vm02:> sudo nvme disconnect -n vde 2026-03-05T23:50:56.523 INFO:teuthology.orchestra.run.vm02.stdout:NQN:vde disconnected 1 controller(s) 2026-03-05T23:50:56.525 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-05T23:50:56.525 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/scratch_devs 2026-03-05T23:50:56.549 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vdb... 2026-03-05T23:50:56.549 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vdb 2026-03-05T23:50:56.643 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vdb disconnected 1 controller(s) 2026-03-05T23:50:56.645 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vdc... 2026-03-05T23:50:56.645 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vdc 2026-03-05T23:50:56.732 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vdc disconnected 1 controller(s) 2026-03-05T23:50:56.734 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vdd... 2026-03-05T23:50:56.734 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vdd 2026-03-05T23:50:56.825 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vdd disconnected 1 controller(s) 2026-03-05T23:50:56.827 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vde... 2026-03-05T23:50:56.827 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n vde 2026-03-05T23:50:56.916 INFO:teuthology.orchestra.run.vm09.stdout:NQN:vde disconnected 1 controller(s) 2026-03-05T23:50:56.918 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-05T23:50:56.918 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/scratch_devs 2026-03-05T23:50:56.940 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-05T23:50:56.942 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-05T23:50:56.942 DEBUG:teuthology.orchestra.run.vm02:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-05T23:50:56.944 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-05T23:50:56.959 INFO:teuthology.orchestra.run.vm02.stderr:bash: line 1: ntpq: command not found 2026-03-05T23:50:56.963 INFO:teuthology.orchestra.run.vm02.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-05T23:50:56.963 INFO:teuthology.orchestra.run.vm02.stdout:=============================================================================== 2026-03-05T23:50:56.963 INFO:teuthology.orchestra.run.vm02.stdout:^* static.222.16.42.77.clie> 2 6 377 62 +2559ns[+4919ns] +/- 2496us 2026-03-05T23:50:56.963 INFO:teuthology.orchestra.run.vm02.stdout:^- dominus.von-oppen.com 2 6 377 64 +1148us[+1150us] +/- 48ms 2026-03-05T23:50:56.963 INFO:teuthology.orchestra.run.vm02.stdout:^- sv5.ggsrv.de 2 6 367 62 +8650us[+8650us] +/- 23ms 2026-03-05T23:50:56.963 INFO:teuthology.orchestra.run.vm02.stdout:^- 172-236-195-26.ip.linode> 3 6 377 63 +1502us[+1505us] +/- 21ms 2026-03-05T23:50:56.995 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-05T23:50:56.998 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-05T23:50:56.998 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-05T23:50:56.998 INFO:teuthology.orchestra.run.vm09.stdout:^- dominus.von-oppen.com 2 6 377 62 +1137us[+1160us] +/- 48ms 2026-03-05T23:50:56.999 INFO:teuthology.orchestra.run.vm09.stdout:^- sv5.ggsrv.de 2 6 377 62 +8627us[+8650us] +/- 23ms 2026-03-05T23:50:56.999 INFO:teuthology.orchestra.run.vm09.stdout:^- 172-236-195-26.ip.linode> 3 6 377 64 +2209us[+2232us] +/- 22ms 2026-03-05T23:50:56.999 INFO:teuthology.orchestra.run.vm09.stdout:^* static.222.16.42.77.clie> 2 6 377 62 +7311ns[ +30us] +/- 2537us 2026-03-05T23:50:56.999 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-05T23:50:57.001 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-05T23:50:57.001 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-05T23:50:57.003 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-05T23:50:57.005 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-05T23:50:57.007 INFO:teuthology.task.internal:Duration was 724.040008 seconds 2026-03-05T23:50:57.007 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-05T23:50:57.009 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-05T23:50:57.009 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-05T23:50:57.011 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-05T23:50:57.044 INFO:teuthology.orchestra.run.vm02.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-05T23:50:57.077 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-05T23:50:57.466 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-05T23:50:57.466 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm02.local 2026-03-05T23:50:57.467 DEBUG:teuthology.orchestra.run.vm02:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-05T23:50:57.528 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-03-05T23:50:57.529 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-05T23:50:57.552 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-05T23:50:57.553 DEBUG:teuthology.orchestra.run.vm02:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-05T23:50:57.570 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-05T23:50:58.425 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-05T23:50:58.425 DEBUG:teuthology.orchestra.run.vm02:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-05T23:50:58.426 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-05T23:50:58.448 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-05T23:50:58.448 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-05T23:50:58.448 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5/home/ubuntu/cephtest/archive/syslog/kern.log: --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-05T23:50:58.448 INFO:teuthology.orchestra.run.vm02.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-05T23:50:58.448 INFO:teuthology.orchestra.run.vm02.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-05T23:50:58.449 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-05T23:50:58.449 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-05T23:50:58.450 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-05T23:50:58.450 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-05T23:50:58.450 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/journalctl.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-05T23:50:58.714 INFO:teuthology.orchestra.run.vm09.stderr: 98.5% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-05T23:50:58.745 INFO:teuthology.orchestra.run.vm02.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-05T23:50:58.747 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-05T23:50:58.749 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-05T23:50:58.750 DEBUG:teuthology.orchestra.run.vm02:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-05T23:50:58.812 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-05T23:50:58.838 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-05T23:50:58.840 DEBUG:teuthology.orchestra.run.vm02:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-05T23:50:58.854 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-05T23:50:58.878 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern = core 2026-03-05T23:50:58.904 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-03-05T23:50:58.917 DEBUG:teuthology.orchestra.run.vm02:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-05T23:50:58.947 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:50:58.947 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-05T23:50:58.971 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-05T23:50:58.971 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-05T23:50:58.973 INFO:teuthology.task.internal:Transferring archived files... 2026-03-05T23:50:58.973 DEBUG:teuthology.misc:Transferring archived files from vm02:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100/remote/vm02 2026-03-05T23:50:58.973 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-05T23:50:59.018 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-05_18:18:00-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/100/remote/vm09 2026-03-05T23:50:59.018 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-05T23:50:59.047 INFO:teuthology.task.internal:Removing archive directory... 2026-03-05T23:50:59.047 DEBUG:teuthology.orchestra.run.vm02:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-05T23:50:59.058 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-05T23:50:59.103 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-05T23:50:59.105 INFO:teuthology.task.internal:Not uploading archives. 2026-03-05T23:50:59.105 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-05T23:50:59.107 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-05T23:50:59.107 DEBUG:teuthology.orchestra.run.vm02:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-05T23:50:59.113 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-05T23:50:59.127 INFO:teuthology.orchestra.run.vm02.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 5 23:50 /home/ubuntu/cephtest 2026-03-05T23:50:59.159 INFO:teuthology.orchestra.run.vm09.stdout: 8532009 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 5 23:50 /home/ubuntu/cephtest 2026-03-05T23:50:59.160 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-05T23:50:59.166 INFO:teuthology.run:Summary data: description: orch:cephadm:osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate} duration: 724.0400083065033 failure_reason: '"2026-03-05T22:47:38.385867+0000 mon.vm02 (mon.0) 504 : cluster [WRN] Health check failed: Failed to apply 1 service(s): osd.all-available-devices (CEPHADM_APPLY_SPEC_FAIL)" in cluster log' owner: irq0 sentry_event: null status: fail success: false 2026-03-05T23:50:59.166 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-05T23:50:59.183 INFO:teuthology.run:FAIL