2026-03-06T22:22:13.124 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-06T22:22:13.130 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-06T22:22:13.168 INFO:teuthology.run:Config: archive_path: /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386 branch: cobaltcore-storage-v19.2.3-fasttrack-5 description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 1-start 2-services/nfs-ingress2 3-final} email: null first_in_suite: false flavor: default job_id: '386' last_in_suite: false machine_type: vps name: irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: cobaltcore-storage-v19.2.3-fasttrack-5 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: Europe/Berlin ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 install: ceph: flavor: default sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a extra_system_packages: deb: - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/x86_64 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-19.2.3-fasttrack-5-no-nvme-loop sha1: b952d7263a165ada4530724b87fab57a8f3f547b owner: irq0 priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 9421 sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a sleep_before_teardown: 0 suite: orch:cephadm:smoke-roleless suite_branch: tt-19.2.3-fasttrack-5-no-nvme-loop suite_path: /home/teuthos/src/github.com_kshtsk_ceph_b952d7263a165ada4530724b87fab57a8f3f547b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: b952d7263a165ada4530724b87fab57a8f3f547b targets: vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLFtVwbs67HsL+gWmxNVez+im0PyIERHGF8aa9nIBNNRUllnuKCWmrEY5KuJQcUguJSKIYZIKFnDodJvjNMvsqk= vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPuDK+jYo7T8wvRRj6UwjpdZITIZ1HnwmGMTEW5eV/6i8tYlgD6lqPUookDXOnEkqTTRCkY74jBALe+DBooqu80= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999 - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999 - echo test > /mnt/foo/testfile - sync - cephadm.shell: host.a: - "echo \"Check with each haproxy down in turn...\"\nfor haproxy in `ceph orch\ \ ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $haproxy\n while ! ceph orch ps | grep $haproxy | grep stopped; do sleep\ \ 1 ; done\n cat /mnt/foo/testfile\n echo $haproxy > /mnt/foo/testfile\n \ \ sync\n ceph orch daemon start $haproxy\n while ! ceph orch ps | grep $haproxy\ \ | grep running; do sleep 1 ; done\ndone\n" volumes: - /mnt/foo:/mnt/foo - vip.exec: all-hosts: - "echo \"Check with $(hostname) ganesha(s) down...\"\nfor c in `systemctl | grep\ \ ceph- | grep @nfs | awk '{print $1}'`; do\n cid=`echo $c | sed 's/@/-/'`\n\ \ id=`echo $c | cut -d @ -f 2 | sed 's/.service$//'`\n fsid=`echo $c |\ \ cut -d @ -f 1 | cut -d - -f 2-`\n echo \"Removing daemon $id fsid $fsid...\"\ \n sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id\n\n echo \"\ Waking up cephadm...\"\n sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh\n\ \n while ! timeout 1 cat /mnt/foo/testfile ; do true ; done\n echo \"\ Mount is back!\"\ndone\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-06_20:21:59 tube: vps user: irq0 verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.43333 2026-03-06T22:22:13.168 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_b952d7263a165ada4530724b87fab57a8f3f547b/qa; will attempt to use it 2026-03-06T22:22:13.168 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_b952d7263a165ada4530724b87fab57a8f3f547b/qa/tasks 2026-03-06T22:22:13.168 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-06T22:22:13.169 INFO:teuthology.task.internal:Saving configuration 2026-03-06T22:22:13.174 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-06T22:22:13.181 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-06T22:22:13.189 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm01.local', 'description': '/archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-06 21:20:41.108853', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:01', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLFtVwbs67HsL+gWmxNVez+im0PyIERHGF8aa9nIBNNRUllnuKCWmrEY5KuJQcUguJSKIYZIKFnDodJvjNMvsqk='} 2026-03-06T22:22:13.196 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-06 21:20:41.109280', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPuDK+jYo7T8wvRRj6UwjpdZITIZ1HnwmGMTEW5eV/6i8tYlgD6lqPUookDXOnEkqTTRCkY74jBALe+DBooqu80='} 2026-03-06T22:22:13.205 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-06T22:22:13.212 INFO:teuthology.task.internal:roles: ubuntu@vm01.local - ['host.a', 'client.0'] 2026-03-06T22:22:13.212 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.b', 'client.1'] 2026-03-06T22:22:13.212 INFO:teuthology.run_tasks:Running task console_log... 2026-03-06T22:22:13.229 DEBUG:teuthology.task.console_log:vm01 does not support IPMI; excluding 2026-03-06T22:22:13.235 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-06T22:22:13.235 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7ff8438aff40>, signals=[15]) 2026-03-06T22:22:13.235 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-06T22:22:13.236 INFO:teuthology.task.internal:Opening connections... 2026-03-06T22:22:13.236 DEBUG:teuthology.task.internal:connecting to ubuntu@vm01.local 2026-03-06T22:22:13.236 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T22:22:13.297 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-06T22:22:13.298 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T22:22:13.358 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-06T22:22:13.360 DEBUG:teuthology.orchestra.run.vm01:> uname -m 2026-03-06T22:22:13.415 INFO:teuthology.orchestra.run.vm01.stdout:x86_64 2026-03-06T22:22:13.415 DEBUG:teuthology.orchestra.run.vm01:> cat /etc/os-release 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:NAME="CentOS Stream" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:VERSION="9" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:ID="centos" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:ID_LIKE="rhel fedora" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_ID="9" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:PLATFORM_ID="platform:el9" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:ANSI_COLOR="0;31" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:LOGO="fedora-logo-icon" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:HOME_URL="https://centos.org/" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-06T22:22:13.470 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-06T22:22:13.470 INFO:teuthology.lock.ops:Updating vm01.local on lock server 2026-03-06T22:22:13.475 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-06T22:22:13.493 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-06T22:22:13.493 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:NAME="CentOS Stream" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="9" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:ID="centos" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE="rhel fedora" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="9" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:PLATFORM_ID="platform:el9" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:ANSI_COLOR="0;31" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:LOGO="fedora-logo-icon" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://centos.org/" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-06T22:22:13.550 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-06T22:22:13.550 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-06T22:22:13.554 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-06T22:22:13.556 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-06T22:22:13.557 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-06T22:22:13.557 DEBUG:teuthology.orchestra.run.vm01:> test '!' -e /home/ubuntu/cephtest 2026-03-06T22:22:13.558 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-06T22:22:13.606 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-06T22:22:13.607 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-06T22:22:13.607 DEBUG:teuthology.orchestra.run.vm01:> test -z $(ls -A /var/lib/ceph) 2026-03-06T22:22:13.613 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-06T22:22:13.627 INFO:teuthology.orchestra.run.vm01.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-06T22:22:13.664 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-06T22:22:13.664 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-06T22:22:13.672 DEBUG:teuthology.orchestra.run.vm01:> test -e /ceph-qa-ready 2026-03-06T22:22:13.687 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:22:13.878 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-06T22:22:13.895 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:22:14.089 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-06T22:22:14.090 INFO:teuthology.task.internal:Creating test directory... 2026-03-06T22:22:14.090 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-06T22:22:14.092 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-06T22:22:14.106 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-06T22:22:14.107 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-06T22:22:14.108 INFO:teuthology.task.internal:Creating archive directory... 2026-03-06T22:22:14.108 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-06T22:22:14.146 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-06T22:22:14.164 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-06T22:22:14.165 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-06T22:22:14.165 DEBUG:teuthology.orchestra.run.vm01:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-06T22:22:14.220 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:22:14.220 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-06T22:22:14.235 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:22:14.235 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-06T22:22:14.263 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-06T22:22:14.289 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T22:22:14.299 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T22:22:14.301 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T22:22:14.312 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T22:22:14.313 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-06T22:22:14.315 INFO:teuthology.task.internal:Configuring sudo... 2026-03-06T22:22:14.315 DEBUG:teuthology.orchestra.run.vm01:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-06T22:22:14.343 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-06T22:22:14.381 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-06T22:22:14.383 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-06T22:22:14.383 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-06T22:22:14.411 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-06T22:22:14.437 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T22:22:14.489 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T22:22:14.545 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:22:14.546 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-06T22:22:14.606 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T22:22:14.628 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T22:22:14.690 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-06T22:22:14.690 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-06T22:22:14.751 DEBUG:teuthology.orchestra.run.vm01:> sudo service rsyslog restart 2026-03-06T22:22:14.753 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-06T22:22:14.781 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T22:22:14.821 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T22:22:15.187 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-06T22:22:15.188 INFO:teuthology.task.internal:Starting timer... 2026-03-06T22:22:15.188 INFO:teuthology.run_tasks:Running task pcp... 2026-03-06T22:22:15.191 INFO:teuthology.run_tasks:Running task selinux... 2026-03-06T22:22:15.193 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-06T22:22:15.193 INFO:teuthology.task.selinux:Excluding vm01: VMs are not yet supported 2026-03-06T22:22:15.193 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-06T22:22:15.193 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-06T22:22:15.193 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-06T22:22:15.193 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-06T22:22:15.193 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-06T22:22:15.194 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'Europe/Berlin'}} 2026-03-06T22:22:15.194 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-03-06T22:22:15.196 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-03-06T22:22:15.854 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-03-06T22:22:15.860 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-06T22:22:15.860 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "Europe/Berlin"}' -i /tmp/teuth_ansible_inventory6kztu5do --limit vm01.local,vm06.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-06T22:23:49.447 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm01.local'), Remote(name='ubuntu@vm06.local')] 2026-03-06T22:23:49.447 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm01.local' 2026-03-06T22:23:49.448 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T22:23:49.513 DEBUG:teuthology.orchestra.run.vm01:> true 2026-03-06T22:23:49.594 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm01.local' 2026-03-06T22:23:49.594 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-06T22:23:49.594 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T22:23:49.664 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-06T22:23:49.746 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-06T22:23:49.746 INFO:teuthology.run_tasks:Running task clock... 2026-03-06T22:23:49.750 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-06T22:23:49.750 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-06T22:23:49.750 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T22:23:49.752 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-06T22:23:49.752 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T22:23:49.795 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-06T22:23:49.813 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-06T22:23:49.844 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-06T22:23:49.848 INFO:teuthology.orchestra.run.vm01.stderr:sudo: ntpd: command not found 2026-03-06T22:23:49.863 INFO:teuthology.orchestra.run.vm01.stdout:506 Cannot talk to daemon 2026-03-06T22:23:49.865 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-06T22:23:49.881 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-06T22:23:49.898 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-06T22:23:49.903 INFO:teuthology.orchestra.run.vm06.stderr:sudo: ntpd: command not found 2026-03-06T22:23:49.919 INFO:teuthology.orchestra.run.vm06.stdout:506 Cannot talk to daemon 2026-03-06T22:23:49.937 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-06T22:23:49.948 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-03-06T22:23:49.951 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T22:23:49.951 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-03-06T22:23:49.959 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-06T22:23:50.015 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-06T22:23:50.020 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T22:23:50.020 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-06T22:23:50.020 INFO:teuthology.run_tasks:Running task pexec... 2026-03-06T22:23:50.023 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-06T22:23:50.023 DEBUG:teuthology.orchestra.run.vm01:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-06T22:23:50.023 DEBUG:teuthology.orchestra.run.vm06:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-06T22:23:50.026 DEBUG:teuthology.task.pexec:ubuntu@vm01.local< sudo dnf remove nvme-cli -y 2026-03-06T22:23:50.026 DEBUG:teuthology.task.pexec:ubuntu@vm01.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-06T22:23:50.026 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm01.local 2026-03-06T22:23:50.026 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-06T22:23:50.026 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-06T22:23:50.026 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf remove nvme-cli -y 2026-03-06T22:23:50.026 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-06T22:23:50.026 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm06.local 2026-03-06T22:23:50.027 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-06T22:23:50.027 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-06T22:23:50.261 INFO:teuthology.orchestra.run.vm06.stdout:No match for argument: nvme-cli 2026-03-06T22:23:50.261 INFO:teuthology.orchestra.run.vm06.stderr:No packages marked for removal. 2026-03-06T22:23:50.266 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-06T22:23:50.266 INFO:teuthology.orchestra.run.vm06.stdout:Nothing to do. 2026-03-06T22:23:50.266 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-06T22:23:50.271 INFO:teuthology.orchestra.run.vm01.stdout:No match for argument: nvme-cli 2026-03-06T22:23:50.271 INFO:teuthology.orchestra.run.vm01.stderr:No packages marked for removal. 2026-03-06T22:23:50.274 INFO:teuthology.orchestra.run.vm01.stdout:Dependencies resolved. 2026-03-06T22:23:50.275 INFO:teuthology.orchestra.run.vm01.stdout:Nothing to do. 2026-03-06T22:23:50.275 INFO:teuthology.orchestra.run.vm01.stdout:Complete! 2026-03-06T22:23:50.745 INFO:teuthology.orchestra.run.vm06.stdout:Last metadata expiration check: 0:01:02 ago on Fri 06 Mar 2026 10:22:48 PM CET. 2026-03-06T22:23:50.794 INFO:teuthology.orchestra.run.vm01.stdout:Last metadata expiration check: 0:01:00 ago on Fri 06 Mar 2026 10:22:50 PM CET. 2026-03-06T22:23:50.855 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-06T22:23:50.855 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-06T22:23:50.855 INFO:teuthology.orchestra.run.vm06.stdout: Package Architecture Version Repository Size 2026-03-06T22:23:50.855 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-06T22:23:50.855 INFO:teuthology.orchestra.run.vm06.stdout:Installing: 2026-03-06T22:23:50.855 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-06T22:23:50.855 INFO:teuthology.orchestra.run.vm06.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout:Installing dependencies: 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout:Transaction Summary 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout:Install 6 Packages 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout:Total download size: 2.3 M 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout:Installed size: 11 M 2026-03-06T22:23:50.856 INFO:teuthology.orchestra.run.vm06.stdout:Downloading Packages: 2026-03-06T22:23:50.915 INFO:teuthology.orchestra.run.vm01.stdout:Dependencies resolved. 2026-03-06T22:23:50.915 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-03-06T22:23:50.915 INFO:teuthology.orchestra.run.vm01.stdout: Package Architecture Version Repository Size 2026-03-06T22:23:50.915 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-03-06T22:23:50.915 INFO:teuthology.orchestra.run.vm01.stdout:Installing: 2026-03-06T22:23:50.915 INFO:teuthology.orchestra.run.vm01.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout:Installing dependencies: 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout:Transaction Summary 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout:Install 6 Packages 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout:Total download size: 2.3 M 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout:Installed size: 11 M 2026-03-06T22:23:50.916 INFO:teuthology.orchestra.run.vm01.stdout:Downloading Packages: 2026-03-06T22:23:51.271 INFO:teuthology.orchestra.run.vm06.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 141 kB/s | 44 kB 00:00 2026-03-06T22:23:51.333 INFO:teuthology.orchestra.run.vm06.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 192 kB/s | 72 kB 00:00 2026-03-06T22:23:51.469 INFO:teuthology.orchestra.run.vm06.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 427 kB/s | 84 kB 00:00 2026-03-06T22:23:51.485 INFO:teuthology.orchestra.run.vm06.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 989 kB/s | 150 kB 00:00 2026-03-06T22:23:51.608 INFO:teuthology.orchestra.run.vm06.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 1.8 MB/s | 1.2 MB 00:00 2026-03-06T22:23:51.688 INFO:teuthology.orchestra.run.vm01.stdout:(1/6): python3-configshell-1.1.30-1.el9.noarch. 302 kB/s | 72 kB 00:00 2026-03-06T22:23:51.689 INFO:teuthology.orchestra.run.vm01.stdout:(2/6): nvmetcli-0.8-3.el9.noarch.rpm 183 kB/s | 44 kB 00:00 2026-03-06T22:23:51.788 INFO:teuthology.orchestra.run.vm01.stdout:(3/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 1.5 MB/s | 150 kB 00:00 2026-03-06T22:23:51.805 INFO:teuthology.orchestra.run.vm06.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 2.4 MB/s | 837 kB 00:00 2026-03-06T22:23:51.806 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-03-06T22:23:51.806 INFO:teuthology.orchestra.run.vm06.stdout:Total 2.4 MB/s | 2.3 MB 00:00 2026-03-06T22:23:51.812 INFO:teuthology.orchestra.run.vm01.stdout:(4/6): python3-kmod-0.9-32.el9.x86_64.rpm 679 kB/s | 84 kB 00:00 2026-03-06T22:23:51.882 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction check 2026-03-06T22:23:51.891 INFO:teuthology.orchestra.run.vm06.stdout:Transaction check succeeded. 2026-03-06T22:23:51.891 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction test 2026-03-06T22:23:51.949 INFO:teuthology.orchestra.run.vm06.stdout:Transaction test succeeded. 2026-03-06T22:23:51.950 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction 2026-03-06T22:23:51.958 INFO:teuthology.orchestra.run.vm01.stdout:(5/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 4.8 MB/s | 837 kB 00:00 2026-03-06T22:23:52.156 INFO:teuthology.orchestra.run.vm06.stdout: Preparing : 1/1 2026-03-06T22:23:52.173 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-06T22:23:52.188 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-06T22:23:52.196 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T22:23:52.207 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T22:23:52.210 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T22:23:52.420 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T22:23:52.424 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T22:23:52.824 INFO:teuthology.orchestra.run.vm01.stdout:(6/6): nvme-cli-2.16-1.el9.x86_64.rpm 858 kB/s | 1.2 MB 00:01 2026-03-06T22:23:52.824 INFO:teuthology.orchestra.run.vm01.stdout:-------------------------------------------------------------------------------- 2026-03-06T22:23:52.824 INFO:teuthology.orchestra.run.vm01.stdout:Total 1.2 MB/s | 2.3 MB 00:01 2026-03-06T22:23:52.879 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction check 2026-03-06T22:23:52.889 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T22:23:52.889 INFO:teuthology.orchestra.run.vm06.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-06T22:23:52.889 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:23:52.891 INFO:teuthology.orchestra.run.vm01.stdout:Transaction check succeeded. 2026-03-06T22:23:52.891 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction test 2026-03-06T22:23:52.961 INFO:teuthology.orchestra.run.vm01.stdout:Transaction test succeeded. 2026-03-06T22:23:52.961 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction 2026-03-06T22:23:53.156 INFO:teuthology.orchestra.run.vm01.stdout: Preparing : 1/1 2026-03-06T22:23:53.169 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-06T22:23:53.186 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-06T22:23:53.193 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T22:23:53.204 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T22:23:53.206 INFO:teuthology.orchestra.run.vm01.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T22:23:53.387 INFO:teuthology.orchestra.run.vm01.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T22:23:53.421 INFO:teuthology.orchestra.run.vm01.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T22:23:53.475 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-06T22:23:53.475 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-06T22:23:53.475 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T22:23:53.475 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T22:23:53.475 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout:Installed: 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:23:53.560 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-06T22:23:53.613 DEBUG:teuthology.parallel:result is None 2026-03-06T22:23:53.841 INFO:teuthology.orchestra.run.vm01.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T22:23:53.841 INFO:teuthology.orchestra.run.vm01.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-06T22:23:53.841 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:23:54.436 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-06T22:23:54.436 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-06T22:23:54.436 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T22:23:54.436 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T22:23:54.436 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout:Installed: 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:23:54.531 INFO:teuthology.orchestra.run.vm01.stdout:Complete! 2026-03-06T22:23:54.601 DEBUG:teuthology.parallel:result is None 2026-03-06T22:23:54.601 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-06T22:23:54.649 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '340d3c24fc6ae7529322dc7ccee6c6cb2589da0a', 'cephadm_binary_url': 'https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5'}} 2026-03-06T22:23:54.649 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-03-06T22:23:54.649 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 2026-03-06T22:23:54.649 INFO:tasks.cephadm:Cluster fsid is c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:23:54.649 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-06T22:23:54.649 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-06T22:23:54.649 INFO:tasks.cephadm:Monitor IPs: {'mon.vm01': '192.168.123.101', 'mon.vm06': '192.168.123.106'} 2026-03-06T22:23:54.649 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-06T22:23:54.649 DEBUG:teuthology.orchestra.run.vm01:> sudo hostname $(hostname -s) 2026-03-06T22:23:54.705 DEBUG:teuthology.orchestra.run.vm06:> sudo hostname $(hostname -s) 2026-03-06T22:23:54.749 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm 2026-03-06T22:23:54.749 DEBUG:teuthology.orchestra.run.vm01:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-06T22:23:55.818 INFO:teuthology.orchestra.run.vm01.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 6 22:23 /home/ubuntu/cephtest/cephadm 2026-03-06T22:23:55.818 DEBUG:teuthology.orchestra.run.vm06:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-06T22:23:56.913 INFO:teuthology.orchestra.run.vm06.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 6 22:23 /home/ubuntu/cephtest/cephadm 2026-03-06T22:23:56.914 DEBUG:teuthology.orchestra.run.vm01:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-06T22:23:56.940 DEBUG:teuthology.orchestra.run.vm06:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-06T22:23:56.967 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 on all hosts... 2026-03-06T22:23:56.967 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-06T22:23:56.982 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-06T22:23:57.398 INFO:teuthology.orchestra.run.vm01.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T22:23:57.425 INFO:teuthology.orchestra.run.vm06.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T22:24:23.954 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-06T22:24:23.955 INFO:teuthology.orchestra.run.vm01.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-06T22:24:23.955 INFO:teuthology.orchestra.run.vm01.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-06T22:24:23.955 INFO:teuthology.orchestra.run.vm01.stdout: "repo_digests": [ 2026-03-06T22:24:23.955 INFO:teuthology.orchestra.run.vm01.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-06T22:24:23.955 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-03-06T22:24:23.955 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-06T22:24:32.182 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-06T22:24:32.182 INFO:teuthology.orchestra.run.vm06.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-06T22:24:32.182 INFO:teuthology.orchestra.run.vm06.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-06T22:24:32.182 INFO:teuthology.orchestra.run.vm06.stdout: "repo_digests": [ 2026-03-06T22:24:32.183 INFO:teuthology.orchestra.run.vm06.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-06T22:24:32.183 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-03-06T22:24:32.183 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-06T22:24:32.210 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /etc/ceph 2026-03-06T22:24:32.250 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /etc/ceph 2026-03-06T22:24:32.292 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 777 /etc/ceph 2026-03-06T22:24:32.322 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 777 /etc/ceph 2026-03-06T22:24:32.364 INFO:tasks.cephadm:Writing seed config... 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-06T22:24:32.365 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-06T22:24:32.365 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:24:32.365 DEBUG:teuthology.orchestra.run.vm01:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-06T22:24:32.394 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = c76e688a-19a2-11f1-bdea-01160fc6f239 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-06T22:24:32.394 DEBUG:teuthology.orchestra.run.vm01:mon.vm01> sudo journalctl -f -n 0 -u ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm01.service 2026-03-06T22:24:32.436 INFO:tasks.cephadm:Bootstrapping... 2026-03-06T22:24:32.436 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 -v bootstrap --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.101 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-06T22:24:32.775 INFO:teuthology.orchestra.run.vm01.stdout:-------------------------------------------------------------------------------- 2026-03-06T22:24:32.775 INFO:teuthology.orchestra.run.vm01.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5', '-v', 'bootstrap', '--fsid', 'c76e688a-19a2-11f1-bdea-01160fc6f239', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.101', '--skip-admin-label'] 2026-03-06T22:24:32.775 INFO:teuthology.orchestra.run.vm01.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-06T22:24:32.775 INFO:teuthology.orchestra.run.vm01.stdout:Verifying podman|docker is present... 2026-03-06T22:24:32.804 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stdout 5.8.0 2026-03-06T22:24:32.804 INFO:teuthology.orchestra.run.vm01.stdout:Verifying lvm2 is present... 2026-03-06T22:24:32.805 INFO:teuthology.orchestra.run.vm01.stdout:Verifying time synchronization is in place... 2026-03-06T22:24:32.814 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-06T22:24:32.814 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-06T22:24:32.822 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-06T22:24:32.822 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-03-06T22:24:32.830 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout enabled 2026-03-06T22:24:32.841 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout active 2026-03-06T22:24:32.842 INFO:teuthology.orchestra.run.vm01.stdout:Unit chronyd.service is enabled and running 2026-03-06T22:24:32.842 INFO:teuthology.orchestra.run.vm01.stdout:Repeating the final host check... 2026-03-06T22:24:32.866 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stdout 5.8.0 2026-03-06T22:24:32.866 INFO:teuthology.orchestra.run.vm01.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-06T22:24:32.866 INFO:teuthology.orchestra.run.vm01.stdout:systemctl is present 2026-03-06T22:24:32.866 INFO:teuthology.orchestra.run.vm01.stdout:lvcreate is present 2026-03-06T22:24:32.874 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-06T22:24:32.874 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-06T22:24:32.882 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-06T22:24:32.882 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-03-06T22:24:32.890 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout enabled 2026-03-06T22:24:32.897 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout active 2026-03-06T22:24:32.898 INFO:teuthology.orchestra.run.vm01.stdout:Unit chronyd.service is enabled and running 2026-03-06T22:24:32.898 INFO:teuthology.orchestra.run.vm01.stdout:Host looks OK 2026-03-06T22:24:32.898 INFO:teuthology.orchestra.run.vm01.stdout:Cluster fsid: c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:24:32.898 INFO:teuthology.orchestra.run.vm01.stdout:Acquiring lock 139841472859584 on /run/cephadm/c76e688a-19a2-11f1-bdea-01160fc6f239.lock 2026-03-06T22:24:32.898 INFO:teuthology.orchestra.run.vm01.stdout:Lock 139841472859584 acquired on /run/cephadm/c76e688a-19a2-11f1-bdea-01160fc6f239.lock 2026-03-06T22:24:32.898 INFO:teuthology.orchestra.run.vm01.stdout:Verifying IP 192.168.123.101 port 3300 ... 2026-03-06T22:24:32.899 INFO:teuthology.orchestra.run.vm01.stdout:Verifying IP 192.168.123.101 port 6789 ... 2026-03-06T22:24:32.899 INFO:teuthology.orchestra.run.vm01.stdout:Base mon IP(s) is [192.168.123.101:3300, 192.168.123.101:6789], mon addrv is [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-06T22:24:32.903 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.101 metric 100 2026-03-06T22:24:32.903 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.101 metric 100 2026-03-06T22:24:32.907 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-06T22:24:32.907 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-06T22:24:32.910 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-06T22:24:32.910 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-06T22:24:32.910 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-06T22:24:32.910 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-06T22:24:32.910 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:1/64 scope link noprefixroute 2026-03-06T22:24:32.910 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-06T22:24:32.911 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.0/24` 2026-03-06T22:24:32.911 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.0/24` 2026-03-06T22:24:32.911 INFO:teuthology.orchestra.run.vm01.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-06T22:24:32.912 INFO:teuthology.orchestra.run.vm01.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-06T22:24:32.912 INFO:teuthology.orchestra.run.vm01.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T22:24:37.981 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stdout 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-06T22:24:37.981 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T22:24:37.981 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Getting image source signatures 2026-03-06T22:24:37.981 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Copying blob sha256:89f108f95c9b33ae21c5514f17c1bd5ca646e21d3c5e8ac1e117cf65bcd40261 2026-03-06T22:24:37.981 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Copying config sha256:8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-06T22:24:37.981 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-06T22:24:38.250 INFO:teuthology.orchestra.run.vm01.stdout:ceph: stdout ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-06T22:24:38.250 INFO:teuthology.orchestra.run.vm01.stdout:Ceph version: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-06T22:24:38.250 INFO:teuthology.orchestra.run.vm01.stdout:Extracting ceph user uid/gid from container image... 2026-03-06T22:24:38.364 INFO:teuthology.orchestra.run.vm01.stdout:stat: stdout 167 167 2026-03-06T22:24:38.364 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial keys... 2026-03-06T22:24:38.590 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQAWRqtpI1M6IBAAResg1R9jg8qvIhtyiw61FQ== 2026-03-06T22:24:38.695 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQAWRqtpRBKYJxAA8ib2G3zqObetq3Ga/ilkyQ== 2026-03-06T22:24:38.809 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQAWRqtpVsR3LhAAuAOvE1Hy1MfbQhV6INcBlw== 2026-03-06T22:24:38.810 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial monmap... 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:monmaptool for vm01 [v2:192.168.123.101:3300,v1:192.168.123.101:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:setting min_mon_release = quincy 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: set fsid to c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:24:38.912 INFO:teuthology.orchestra.run.vm01.stdout:Creating mon... 2026-03-06T22:24:39.076 INFO:teuthology.orchestra.run.vm01.stdout:create mon.vm01 on 2026-03-06T22:24:39.459 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-06T22:24:39.629 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239.target → /etc/systemd/system/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239.target. 2026-03-06T22:24:39.629 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239.target → /etc/systemd/system/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239.target. 2026-03-06T22:24:39.835 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm01 2026-03-06T22:24:39.835 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to reset failed state of unit ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm01.service: Unit ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm01.service not loaded. 2026-03-06T22:24:40.035 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239.target.wants/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm01.service → /etc/systemd/system/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@.service. 2026-03-06T22:24:40.510 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-06T22:24:40.510 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to enable service . firewalld.service is not available 2026-03-06T22:24:40.510 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mon to start... 2026-03-06T22:24:40.512 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mon... 2026-03-06T22:24:40.974 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout cluster: 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout id: c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout services: 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm01 (age 0.240789s) 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout data: 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout pgs: 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:mon is available 2026-03-06T22:24:40.975 INFO:teuthology.orchestra.run.vm01.stdout:Assimilating anything we can from ceph.conf... 2026-03-06T22:24:41.323 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [global] 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout fsid = c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [osd] 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-06T22:24:41.324 INFO:teuthology.orchestra.run.vm01.stdout:Generating new minimal ceph.conf... 2026-03-06T22:24:42.144 INFO:teuthology.orchestra.run.vm01.stdout:Restarting the monitor... 2026-03-06T22:24:42.719 INFO:teuthology.orchestra.run.vm01.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-06T22:24:43.116 INFO:teuthology.orchestra.run.vm01.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-06T22:24:43.117 INFO:teuthology.orchestra.run.vm01.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-06T22:24:43.117 INFO:teuthology.orchestra.run.vm01.stdout:Creating mgr... 2026-03-06T22:24:43.117 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-06T22:24:43.118 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-06T22:24:43.118 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:8443 ... 2026-03-06T22:24:43.317 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mgr.vm01.mrlynj 2026-03-06T22:24:43.317 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to reset failed state of unit ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mgr.vm01.mrlynj.service: Unit ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mgr.vm01.mrlynj.service not loaded. 2026-03-06T22:24:43.480 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239.target.wants/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mgr.vm01.mrlynj.service → /etc/systemd/system/ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@.service. 2026-03-06T22:24:43.672 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-06T22:24:43.672 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to enable service . firewalld.service is not available 2026-03-06T22:24:43.672 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-06T22:24:43.672 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-03-06T22:24:43.672 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr to start... 2026-03-06T22:24:43.672 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr... 2026-03-06T22:24:44.834 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:44.834 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:24:44.834 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "c76e688a-19a2-11f1-bdea-01160fc6f239", 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T22:24:44.835 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T22:24:44.836 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T21:24:40:685056+0000", 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T21:24:40.691280+0000", 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:24:44.837 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (1/15)... 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "c76e688a-19a2-11f1-bdea-01160fc6f239", 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 4, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T22:24:47.266 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T21:24:40:685056+0000", 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T21:24:40.691280+0000", 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:24:47.267 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (2/15)... 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "c76e688a-19a2-11f1-bdea-01160fc6f239", 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:49.617 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 6, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T21:24:40:685056+0000", 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T22:24:49.618 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T21:24:40.691280+0000", 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:24:49.619 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (3/15)... 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "c76e688a-19a2-11f1-bdea-01160fc6f239", 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 9, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T21:24:40:685056+0000", 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T22:24:52.143 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T21:24:40.691280+0000", 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:24:52.144 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (4/15)... 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "c76e688a-19a2-11f1-bdea-01160fc6f239", 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 11, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:54.559 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T21:24:40:685056+0000", 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T21:24:40.691280+0000", 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:24:54.560 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (5/15)... 2026-03-06T22:24:55.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:55 vm01 ceph-mon[46942]: from='mgr.14100 192.168.123.101:0/541378129' entity='mgr.vm01.mrlynj' 2026-03-06T22:24:56.957 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "c76e688a-19a2-11f1-bdea-01160fc6f239", 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 14, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:56.958 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T21:24:40:685056+0000", 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T21:24:40.691280+0000", 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T22:24:56.959 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-06T22:24:56.960 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T22:24:56.960 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:24:56.960 INFO:teuthology.orchestra.run.vm01.stdout:mgr is available 2026-03-06T22:24:57.351 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [global] 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout fsid = c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [osd] 2026-03-06T22:24:57.352 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-06T22:24:57.353 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-06T22:24:57.353 INFO:teuthology.orchestra.run.vm01.stdout:Enabling cephadm module... 2026-03-06T22:24:57.473 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:57 vm01 ceph-mon[46942]: mgrmap e3: vm01.mrlynj(active, since 1.0087s) 2026-03-06T22:24:57.473 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:57 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3707712034' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-06T22:24:58.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:58 vm01 ceph-mon[46942]: mgrmap e4: vm01.mrlynj(active, since 2s) 2026-03-06T22:24:58.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:58 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1667922194' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-06T22:24:58.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:58 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2256885684' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "active_name": "vm01.mrlynj", 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for the mgr to restart... 2026-03-06T22:24:58.750 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr epoch 5... 2026-03-06T22:24:59.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:59 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2256885684' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-06T22:24:59.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:59 vm01 ceph-mon[46942]: mgrmap e5: vm01.mrlynj(active, since 3s) 2026-03-06T22:24:59.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:24:59 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2983148528' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: Active manager daemon vm01.mrlynj restarted 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: Activating manager daemon vm01.mrlynj 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: osdmap e2: 0 total, 0 up, 0 in 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: mgrmap e6: vm01.mrlynj(active, starting, since 0.0129685s) 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr metadata", "who": "vm01.mrlynj", "id": "vm01.mrlynj"}]: dispatch 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: Manager daemon vm01.mrlynj is now available 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:09.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:09 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:10.177 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:25:10.177 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-06T22:25:10.177 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-06T22:25:10.177 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:25:10.177 INFO:teuthology.orchestra.run.vm01.stdout:mgr epoch 5 is available 2026-03-06T22:25:10.177 INFO:teuthology.orchestra.run.vm01.stdout:Setting orchestrator backend to cephadm... 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: Found migration_current of "None". Setting to last migration. 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/mirror_snapshot_schedule"}]: dispatch 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/trash_purge_schedule"}]: dispatch 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:10.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:10 vm01 ceph-mon[46942]: mgrmap e7: vm01.mrlynj(active, since 1.02549s) 2026-03-06T22:25:11.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:11 vm01 ceph-mon[46942]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-06T22:25:11.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:11 vm01 ceph-mon[46942]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-06T22:25:11.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:11 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:10] ENGINE Bus STARTING 2026-03-06T22:25:11.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:11 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:25:11.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:11 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:11.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:11 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:25:11.303 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-06T22:25:11.303 INFO:teuthology.orchestra.run.vm01.stdout:Generating ssh key... 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:10] ENGINE Serving on https://192.168.123.101:7150 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:10] ENGINE Client ('192.168.123.101', 37422) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:10] ENGINE Serving on http://192.168.123.101:8765 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:10] ENGINE Bus STARTED 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: mgrmap e8: vm01.mrlynj(active, since 2s) 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:12.363 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:12 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:12.420 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7vJ3OC+Y1amyi2+M2lIPrNJoWPoy++V//z1seFI85X3rMw1AakVoQbll34F/rjYxohal5JPFYbgrcY3wouwBGsoLowJvIEVC600DE+CxqqS96hlggW6aIdUtIZQbsqBMtJiuXZq2CqH09Vts1c/w6IhM1bwSzwquJy5ztz7P+WTtUdI2kiOTaUC80d8Oc7Uo7QpfgpSdaTYVdKPevsgo7EGKIpUeJmSeR/o+pg53lCYHvOFg7FGW00zXjzMZ5ra0kQnCeI4zTjoanVcy+iOXku++YTyQjv6uetk8uiZmI3Qk3K25DRXNQbTnIjNcRF4vk+EfgW6M6CI6dSUmSEaLbXJ2GhQ/B+BpnYA8U3hqMaTXxcr6VdrXvOXdU85SAssZL2/ys68ylX84CTEd9+ctZUXYyQQNXT7YVamsrAas9B9/6N7s2vWHDWFLHSQeIuv5Z/yU9ORYrxa+hnWu8uCtrcT/8ISfrxhZxv6aOldR31J+r04xzfN2VcrPBWND/r00= ceph-c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:25:12.420 INFO:teuthology.orchestra.run.vm01.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-06T22:25:12.420 INFO:teuthology.orchestra.run.vm01.stdout:Adding key to root@localhost authorized_keys... 2026-03-06T22:25:12.421 INFO:teuthology.orchestra.run.vm01.stdout:Adding host vm01... 2026-03-06T22:25:13.399 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:13 vm01 ceph-mon[46942]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:13.399 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:13 vm01 ceph-mon[46942]: Generating ssh key... 2026-03-06T22:25:13.399 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:13 vm01 ceph-mon[46942]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:14 vm01 ceph-mon[46942]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm01", "addr": "192.168.123.101", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:15.487 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:15 vm01 ceph-mon[46942]: Deploying cephadm binary to vm01 2026-03-06T22:25:15.586 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Added host 'vm01' with addr '192.168.123.101' 2026-03-06T22:25:15.587 INFO:teuthology.orchestra.run.vm01.stdout:Deploying mon service with default placement... 2026-03-06T22:25:16.046 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-06T22:25:16.046 INFO:teuthology.orchestra.run.vm01.stdout:Deploying mgr service with default placement... 2026-03-06T22:25:16.461 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-06T22:25:16.461 INFO:teuthology.orchestra.run.vm01.stdout:Deploying crash service with default placement... 2026-03-06T22:25:16.728 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:16 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:16.728 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:16 vm01 ceph-mon[46942]: Added host vm01 2026-03-06T22:25:16.728 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:16 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:25:16.728 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:16 vm01 ceph-mon[46942]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:16.728 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:16 vm01 ceph-mon[46942]: Saving service mon spec with placement count:5 2026-03-06T22:25:16.728 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:16 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:16.728 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:16 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:16.889 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-03-06T22:25:16.889 INFO:teuthology.orchestra.run.vm01.stdout:Deploying ceph-exporter service with default placement... 2026-03-06T22:25:17.358 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-03-06T22:25:17.358 INFO:teuthology.orchestra.run.vm01.stdout:Deploying prometheus service with default placement... 2026-03-06T22:25:17.826 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-03-06T22:25:17.826 INFO:teuthology.orchestra.run.vm01.stdout:Deploying grafana service with default placement... 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: Saving service mgr spec with placement count:2 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: Saving service crash spec with placement * 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: Saving service ceph-exporter spec with placement * 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:18.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:17 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:18.305 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-03-06T22:25:18.305 INFO:teuthology.orchestra.run.vm01.stdout:Deploying node-exporter service with default placement... 2026-03-06T22:25:18.718 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-03-06T22:25:18.718 INFO:teuthology.orchestra.run.vm01.stdout:Deploying alertmanager service with default placement... 2026-03-06T22:25:19.154 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: Saving service prometheus spec with placement count:1 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: Saving service grafana spec with placement count:1 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:19 vm01 ceph-mon[46942]: from='mgr.14124 192.168.123.101:0/1368912896' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:19.987 INFO:teuthology.orchestra.run.vm01.stdout:Enabling the dashboard module... 2026-03-06T22:25:20.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:20 vm01 ceph-mon[46942]: from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:20.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:20 vm01 ceph-mon[46942]: Saving service node-exporter spec with placement * 2026-03-06T22:25:20.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:20 vm01 ceph-mon[46942]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:20.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:20 vm01 ceph-mon[46942]: Saving service alertmanager spec with placement count:1 2026-03-06T22:25:20.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:20 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/17060257' entity='client.admin' 2026-03-06T22:25:20.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:20 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/95266931' entity='client.admin' 2026-03-06T22:25:21.615 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:21 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/315913636' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-06T22:25:21.782 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:25:21.782 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-06T22:25:21.782 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-06T22:25:21.782 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "active_name": "vm01.mrlynj", 2026-03-06T22:25:21.782 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-06T22:25:21.783 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:25:21.783 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for the mgr to restart... 2026-03-06T22:25:21.783 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr epoch 9... 2026-03-06T22:25:22.255 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:22 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/315913636' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-06T22:25:22.255 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:22 vm01 ceph-mon[46942]: mgrmap e9: vm01.mrlynj(active, since 12s) 2026-03-06T22:25:22.255 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:22 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1790568827' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: Active manager daemon vm01.mrlynj restarted 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: Activating manager daemon vm01.mrlynj 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: osdmap e3: 0 total, 0 up, 0 in 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: mgrmap e10: vm01.mrlynj(active, starting, since 0.00837101s) 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr metadata", "who": "vm01.mrlynj", "id": "vm01.mrlynj"}]: dispatch 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T22:25:31.928 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T22:25:31.929 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: Manager daemon vm01.mrlynj is now available 2026-03-06T22:25:31.929 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:25:31.929 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:31 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/mirror_snapshot_schedule"}]: dispatch 2026-03-06T22:25:32.848 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-06T22:25:32.848 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-06T22:25:32.848 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-06T22:25:32.848 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-06T22:25:32.848 INFO:teuthology.orchestra.run.vm01.stdout:mgr epoch 9 is available 2026-03-06T22:25:32.848 INFO:teuthology.orchestra.run.vm01.stdout:Generating a dashboard self-signed certificate... 2026-03-06T22:25:33.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:32 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/trash_purge_schedule"}]: dispatch 2026-03-06T22:25:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:32 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:32 vm01 ceph-mon[46942]: mgrmap e11: vm01.mrlynj(active, since 1.01309s) 2026-03-06T22:25:33.374 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-06T22:25:33.374 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial admin user... 2026-03-06T22:25:33.950 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$wjpA9qxBymIoQA4uFeDOSeE.ZaNwLQApEtfJsBBx4kDqC1M73oONC", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1772832333, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-06T22:25:33.950 INFO:teuthology.orchestra.run.vm01.stdout:Fetching dashboard port number... 2026-03-06T22:25:34.318 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 8443 2026-03-06T22:25:34.319 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-06T22:25:34.319 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-06T22:25:34.320 INFO:teuthology.orchestra.run.vm01.stdout:Ceph Dashboard is now available at: 2026-03-06T22:25:34.320 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.320 INFO:teuthology.orchestra.run.vm01.stdout: URL: https://vm01.local:8443/ 2026-03-06T22:25:34.320 INFO:teuthology.orchestra.run.vm01.stdout: User: admin 2026-03-06T22:25:34.320 INFO:teuthology.orchestra.run.vm01.stdout: Password: 6nz0jc7syt 2026-03-06T22:25:34.320 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.320 INFO:teuthology.orchestra.run.vm01.stdout:Saving cluster configuration to /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config directory 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:32] ENGINE Bus STARTING 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='client.14172 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='client.14172 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:32] ENGINE Serving on http://192.168.123.101:8765 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:32] ENGINE Serving on https://192.168.123.101:7150 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:32] ENGINE Bus STARTED 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: [06/Mar/2026:21:25:32] ENGINE Client ('192.168.123.101', 58870) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:34.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:34 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2124323918' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout:Or, if you are only running a single cluster on this host: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: ceph telemetry on 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout:For more information see: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:34.756 INFO:teuthology.orchestra.run.vm01.stdout:Bootstrap complete. 2026-03-06T22:25:34.793 INFO:tasks.cephadm:Fetching config... 2026-03-06T22:25:34.793 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:25:34.793 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-06T22:25:34.826 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-06T22:25:34.826 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:25:34.826 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-06T22:25:34.887 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-06T22:25:34.888 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:25:34.888 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/keyring of=/dev/stdout 2026-03-06T22:25:34.956 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-06T22:25:34.956 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:25:34.956 DEBUG:teuthology.orchestra.run.vm01:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-06T22:25:35.018 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-06T22:25:35.018 DEBUG:teuthology.orchestra.run.vm01:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7vJ3OC+Y1amyi2+M2lIPrNJoWPoy++V//z1seFI85X3rMw1AakVoQbll34F/rjYxohal5JPFYbgrcY3wouwBGsoLowJvIEVC600DE+CxqqS96hlggW6aIdUtIZQbsqBMtJiuXZq2CqH09Vts1c/w6IhM1bwSzwquJy5ztz7P+WTtUdI2kiOTaUC80d8Oc7Uo7QpfgpSdaTYVdKPevsgo7EGKIpUeJmSeR/o+pg53lCYHvOFg7FGW00zXjzMZ5ra0kQnCeI4zTjoanVcy+iOXku++YTyQjv6uetk8uiZmI3Qk3K25DRXNQbTnIjNcRF4vk+EfgW6M6CI6dSUmSEaLbXJ2GhQ/B+BpnYA8U3hqMaTXxcr6VdrXvOXdU85SAssZL2/ys68ylX84CTEd9+ctZUXYyQQNXT7YVamsrAas9B9/6N7s2vWHDWFLHSQeIuv5Z/yU9ORYrxa+hnWu8uCtrcT/8ISfrxhZxv6aOldR31J+r04xzfN2VcrPBWND/r00= ceph-c76e688a-19a2-11f1-bdea-01160fc6f239' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-06T22:25:35.095 INFO:teuthology.orchestra.run.vm01.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7vJ3OC+Y1amyi2+M2lIPrNJoWPoy++V//z1seFI85X3rMw1AakVoQbll34F/rjYxohal5JPFYbgrcY3wouwBGsoLowJvIEVC600DE+CxqqS96hlggW6aIdUtIZQbsqBMtJiuXZq2CqH09Vts1c/w6IhM1bwSzwquJy5ztz7P+WTtUdI2kiOTaUC80d8Oc7Uo7QpfgpSdaTYVdKPevsgo7EGKIpUeJmSeR/o+pg53lCYHvOFg7FGW00zXjzMZ5ra0kQnCeI4zTjoanVcy+iOXku++YTyQjv6uetk8uiZmI3Qk3K25DRXNQbTnIjNcRF4vk+EfgW6M6CI6dSUmSEaLbXJ2GhQ/B+BpnYA8U3hqMaTXxcr6VdrXvOXdU85SAssZL2/ys68ylX84CTEd9+ctZUXYyQQNXT7YVamsrAas9B9/6N7s2vWHDWFLHSQeIuv5Z/yU9ORYrxa+hnWu8uCtrcT/8ISfrxhZxv6aOldR31J+r04xzfN2VcrPBWND/r00= ceph-c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:25:35.105 DEBUG:teuthology.orchestra.run.vm06:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7vJ3OC+Y1amyi2+M2lIPrNJoWPoy++V//z1seFI85X3rMw1AakVoQbll34F/rjYxohal5JPFYbgrcY3wouwBGsoLowJvIEVC600DE+CxqqS96hlggW6aIdUtIZQbsqBMtJiuXZq2CqH09Vts1c/w6IhM1bwSzwquJy5ztz7P+WTtUdI2kiOTaUC80d8Oc7Uo7QpfgpSdaTYVdKPevsgo7EGKIpUeJmSeR/o+pg53lCYHvOFg7FGW00zXjzMZ5ra0kQnCeI4zTjoanVcy+iOXku++YTyQjv6uetk8uiZmI3Qk3K25DRXNQbTnIjNcRF4vk+EfgW6M6CI6dSUmSEaLbXJ2GhQ/B+BpnYA8U3hqMaTXxcr6VdrXvOXdU85SAssZL2/ys68ylX84CTEd9+ctZUXYyQQNXT7YVamsrAas9B9/6N7s2vWHDWFLHSQeIuv5Z/yU9ORYrxa+hnWu8uCtrcT/8ISfrxhZxv6aOldR31J+r04xzfN2VcrPBWND/r00= ceph-c76e688a-19a2-11f1-bdea-01160fc6f239' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-06T22:25:35.144 INFO:teuthology.orchestra.run.vm06.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7vJ3OC+Y1amyi2+M2lIPrNJoWPoy++V//z1seFI85X3rMw1AakVoQbll34F/rjYxohal5JPFYbgrcY3wouwBGsoLowJvIEVC600DE+CxqqS96hlggW6aIdUtIZQbsqBMtJiuXZq2CqH09Vts1c/w6IhM1bwSzwquJy5ztz7P+WTtUdI2kiOTaUC80d8Oc7Uo7QpfgpSdaTYVdKPevsgo7EGKIpUeJmSeR/o+pg53lCYHvOFg7FGW00zXjzMZ5ra0kQnCeI4zTjoanVcy+iOXku++YTyQjv6uetk8uiZmI3Qk3K25DRXNQbTnIjNcRF4vk+EfgW6M6CI6dSUmSEaLbXJ2GhQ/B+BpnYA8U3hqMaTXxcr6VdrXvOXdU85SAssZL2/ys68ylX84CTEd9+ctZUXYyQQNXT7YVamsrAas9B9/6N7s2vWHDWFLHSQeIuv5Z/yU9ORYrxa+hnWu8uCtrcT/8ISfrxhZxv6aOldR31J+r04xzfN2VcrPBWND/r00= ceph-c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:25:35.158 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-06T22:25:35.296 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:35 vm01 ceph-mon[46942]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:35.296 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:35 vm01 ceph-mon[46942]: mgrmap e12: vm01.mrlynj(active, since 2s) 2026-03-06T22:25:35.296 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:35 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3633040914' entity='client.admin' 2026-03-06T22:25:35.553 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:25:35.970 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-06T22:25:35.970 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-06T22:25:36.368 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:25:36.765 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm06 2026-03-06T22:25:36.765 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-06T22:25:36.765 DEBUG:teuthology.orchestra.run.vm06:> dd of=/etc/ceph/ceph.conf 2026-03-06T22:25:36.780 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-06T22:25:36.780 DEBUG:teuthology.orchestra.run.vm06:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:25:36.837 INFO:tasks.cephadm:Adding host vm06 to orchestrator... 2026-03-06T22:25:36.837 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch host add vm06 2026-03-06T22:25:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:36 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2732166026' entity='client.admin' 2026-03-06T22:25:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:36 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:37.260 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:25:38.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:37 vm01 ceph-mon[46942]: from='client.14190 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:38.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:37 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:38.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:37 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:38.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:37 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:25:38.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:37 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:25:38.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:37 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: from='client.14192 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: Updating vm01:/etc/ceph/ceph.conf 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.client.admin.keyring 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: Deploying cephadm binary to vm06 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: Deploying daemon ceph-exporter.vm01 on vm01 2026-03-06T22:25:39.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:39 vm01 ceph-mon[46942]: mgrmap e13: vm01.mrlynj(active, since 6s) 2026-03-06T22:25:40.363 INFO:teuthology.orchestra.run.vm01.stdout:Added host 'vm06' with addr '192.168.123.106' 2026-03-06T22:25:40.501 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch host ls --format=json 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: Deploying daemon crash.vm01 on vm01 2026-03-06T22:25:40.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:40 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:40.953 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:25:41.373 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:25:41.373 INFO:teuthology.orchestra.run.vm01.stdout:[{"addr": "192.168.123.101", "hostname": "vm01", "labels": [], "status": ""}, {"addr": "192.168.123.106", "hostname": "vm06", "labels": [], "status": ""}] 2026-03-06T22:25:41.438 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-06T22:25:41.439 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd crush tunables default 2026-03-06T22:25:41.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:41 vm01 ceph-mon[46942]: Added host vm06 2026-03-06T22:25:41.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:41 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:41.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:41 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:41.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:41 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:41.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:41 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:41.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:41 vm01 ceph-mon[46942]: Deploying daemon node-exporter.vm01 on vm01 2026-03-06T22:25:41.885 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:25:42.808 INFO:teuthology.orchestra.run.vm01.stderr:adjusted tunables profile to default 2026-03-06T22:25:42.931 INFO:tasks.cephadm:Adding mon.vm01 on vm01 2026-03-06T22:25:42.931 INFO:tasks.cephadm:Adding mon.vm06 on vm06 2026-03-06T22:25:42.931 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch apply mon '2;vm01:192.168.123.101=vm01;vm06:192.168.123.106=vm06' 2026-03-06T22:25:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:42 vm01 ceph-mon[46942]: from='client.14195 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:25:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:42 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:42 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2837524723' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-06T22:25:43.276 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:43.318 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:43.661 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled mon update... 2026-03-06T22:25:43.730 DEBUG:teuthology.orchestra.run.vm06:mon.vm06> sudo journalctl -f -n 0 -u ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm06.service 2026-03-06T22:25:43.731 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:43.732 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:44.117 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:43 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2837524723' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-06T22:25:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:43 vm01 ceph-mon[46942]: osdmap e4: 0 total, 0 up, 0 in 2026-03-06T22:25:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:43 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:43 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:43 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:43 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:43 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:44.163 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:44.544 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:44.544 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:44.544 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:44 vm01 ceph-mon[46942]: from='client.14199 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm01:192.168.123.101=vm01;vm06:192.168.123.106=vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:25:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:44 vm01 ceph-mon[46942]: Saving service mon spec with placement vm01:192.168.123.101=vm01;vm06:192.168.123.106=vm06;count:2 2026-03-06T22:25:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:44 vm01 ceph-mon[46942]: Deploying daemon alertmanager.vm01 on vm01 2026-03-06T22:25:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:44 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/2919570203' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:45.628 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:45.628 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:45.979 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:46.033 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:46.416 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:46.417 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:46.417 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:46.719 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:46 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/4273425330' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:47.511 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:47.511 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:47.833 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:47.873 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-06T22:25:48.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:47 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:48.244 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:48.244 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:48.244 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:49.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:48 vm01 ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-06T22:25:49.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:48 vm01 ceph-mon[46942]: Deploying daemon grafana.vm01 on vm01 2026-03-06T22:25:49.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:48 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1335398542' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:49.320 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:49.320 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:49.646 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:49.687 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:50.091 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:50.094 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:50.094 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:50.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:50 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1319930769' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:51.620 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:51.620 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:51.981 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:52.030 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:52.428 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:52.428 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:52.428 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:53.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:52 vm01 ceph-mon[46942]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:25:53.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:52 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:53.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:52 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1614113668' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:53.482 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:53.482 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:53.869 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:53.917 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:54.385 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:54.385 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:54.386 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: Deploying daemon prometheus.vm01 on vm01 2026-03-06T22:25:55.338 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:55 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/139354028' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:55.488 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:55.488 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:55.821 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:55.862 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:56.209 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:56.210 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:56.210 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:57.303 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:57.303 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:57.613 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:57.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:57 vm01 ceph-mon[46942]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:25:57.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:57 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1615773850' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:57.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:57 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:25:57.648 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:58.020 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:25:58.020 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:25:58.020 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:25:58.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:58 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1826434694' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:25:59.081 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:25:59.082 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:25:59.432 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:25:59.474 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:00.004 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:00.004 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:00.004 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:00.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:25:59 vm01 ceph-mon[46942]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:00.849 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:00 vm01 ceph-mon[46942]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:00.849 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:00 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/2041589561' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:01.086 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:01.086 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:01.421 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:01.461 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:01.843 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:01.843 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:01.843 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:02.252 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:01 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:02.252 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:01 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:02.252 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:01 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:02.252 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:01 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-06T22:26:02.252 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:01 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:02.252 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:01 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/2018937148' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:02.893 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:02.893 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:03.221 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:03.263 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:03.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:02 vm01 ceph-mon[46942]: from='mgr.14168 192.168.123.101:0/3874934246' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-06T22:26:03.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:02 vm01 ceph-mon[46942]: mgrmap e14: vm01.mrlynj(active, since 30s) 2026-03-06T22:26:03.645 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:03.645 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:03.645 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:04.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:03 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/2447447370' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:04.736 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:04.736 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:05.090 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:05.141 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:05.534 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:05.534 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:05.534 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:05.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:05 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3195802147' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:06.597 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:06.597 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:06.968 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:07.011 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:07.428 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:07.428 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:07.428 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:07.791 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:07 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/4072808312' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:08.489 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:08.490 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:08.837 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:08.879 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:09.274 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:09.275 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:09.275 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:09.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:09 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3331887938' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:10.348 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:10.348 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:10.723 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:10.767 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:11.297 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:11.297 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:11.297 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:11.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:11 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1790765301' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:12.364 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:12.364 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:12.718 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:12.763 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:13.212 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:13.212 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:13.212 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:13.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1256759528' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: Active manager daemon vm01.mrlynj restarted 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: Activating manager daemon vm01.mrlynj 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: mgrmap e15: vm01.mrlynj(active, starting, since 0.0102076s) 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr metadata", "who": "vm01.mrlynj", "id": "vm01.mrlynj"}]: dispatch 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T22:26:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:13 vm01 ceph-mon[46942]: Manager daemon vm01.mrlynj is now available 2026-03-06T22:26:14.297 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:14.297 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:26:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/mirror_snapshot_schedule"}]: dispatch 2026-03-06T22:26:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/trash_purge_schedule"}]: dispatch 2026-03-06T22:26:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:14.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:14 vm01 ceph-mon[46942]: mgrmap e16: vm01.mrlynj(active, since 1.02922s) 2026-03-06T22:26:14.684 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:14.739 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:15.168 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:15.168 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:15.168 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:15.602 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:15.602 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:15 vm01 ceph-mon[46942]: [06/Mar/2026:21:26:14] ENGINE Bus STARTING 2026-03-06T22:26:15.602 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:15 vm01 ceph-mon[46942]: [06/Mar/2026:21:26:14] ENGINE Serving on http://192.168.123.101:8765 2026-03-06T22:26:15.602 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:15 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3658842093' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:16.625 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:16.625 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:16.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:16 vm01 ceph-mon[46942]: [06/Mar/2026:21:26:15] ENGINE Serving on https://192.168.123.101:7150 2026-03-06T22:26:16.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:16 vm01 ceph-mon[46942]: [06/Mar/2026:21:26:15] ENGINE Bus STARTED 2026-03-06T22:26:16.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:16 vm01 ceph-mon[46942]: [06/Mar/2026:21:26:15] ENGINE Client ('192.168.123.101', 60066) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T22:26:16.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:16 vm01 ceph-mon[46942]: mgrmap e17: vm01.mrlynj(active, since 2s) 2026-03-06T22:26:16.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:16 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:16.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:16 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:16.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:16 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:17.000 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:17.053 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T22:26:17.466 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:17.466 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:17.466 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:18.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:17 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:18.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:17 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:18.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:17 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:26:18.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:17 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3381680147' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:18.532 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:18.533 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:18.935 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:26:19.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm01:/etc/ceph/ceph.conf 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm06:/etc/ceph/ceph.conf 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm06:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm06:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.client.admin.keyring 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.client.admin.keyring 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:19.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:19 vm01 ceph-mon[46942]: Deploying daemon ceph-exporter.vm06 on vm06 2026-03-06T22:26:19.402 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:19.402 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:19.402 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:20.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:20 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3061069879' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:20.589 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:20.589 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:21.006 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:21.477 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:21.478 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:21.478 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:21.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:21 vm01 ceph-mon[46942]: Deploying daemon crash.vm06 on vm06 2026-03-06T22:26:22.578 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:22.578 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:22.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:22 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3588285611' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:22.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:22 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:22.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:22 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:22.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:22 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:22.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:22 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:22.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:22 vm01 ceph-mon[46942]: Deploying daemon node-exporter.vm06 on vm06 2026-03-06T22:26:22.909 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:23.403 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:23.403 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:23.403 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:23.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:23 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3657758832' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:23.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:23 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:24.480 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:24.480 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:25.182 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:25.735 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:25.735 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":1,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:24:38.888443Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T22:26:25.735 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 1 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.awlziz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm06.awlziz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:25.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:25 vm01 ceph-mon[46942]: Deploying daemon mgr.vm06.awlziz on vm06 2026-03-06T22:26:26.864 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T22:26:26.865 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mon dump -f json 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/4025801457' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:26.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:26 vm01 ceph-mon[46942]: Deploying daemon mon.vm06 on vm06 2026-03-06T22:26:26.903 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:26 vm06 systemd[1]: Starting Ceph mon.vm06 for c76e688a-19a2-11f1-bdea-01160fc6f239... 2026-03-06T22:26:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:26 vm06 podman[52560]: 2026-03-06 22:26:26.998795881 +0100 CET m=+0.082439408 container create d717d212d7fb4c392fede99894a4e2a8cfe5647fff71bd5fb51eaeb6f74b09a4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-06T22:26:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 podman[52560]: 2026-03-06 22:26:26.939391263 +0100 CET m=+0.023034799 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-06T22:26:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 podman[52560]: 2026-03-06 22:26:27.126851085 +0100 CET m=+0.210494612 container init d717d212d7fb4c392fede99894a4e2a8cfe5647fff71bd5fb51eaeb6f74b09a4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-06T22:26:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 podman[52560]: 2026-03-06 22:26:27.138381159 +0100 CET m=+0.222024686 container start d717d212d7fb4c392fede99894a4e2a8cfe5647fff71bd5fb51eaeb6f74b09a4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-06T22:26:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 bash[52560]: d717d212d7fb4c392fede99894a4e2a8cfe5647fff71bd5fb51eaeb6f74b09a4 2026-03-06T22:26:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 systemd[1]: Started Ceph mon.vm06 for c76e688a-19a2-11f1-bdea-01160fc6f239. 2026-03-06T22:26:27.320 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm06/config 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: set uid:gid to 167:167 (ceph:ceph) 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 2 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: pidfile_write: ignore empty --pid-file 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: load: jerasure load: lrc 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: RocksDB version: 7.9.2 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Git sha 0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: DB SUMMARY 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: DB Session ID: Z5AWG95NYFO0OHYF6B7K 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: CURRENT file: CURRENT 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: IDENTITY file: IDENTITY 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm06/store.db dir, Total Num: 0, files: 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm06/store.db: 000004.log size: 511 ; 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.error_if_exists: 0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.create_if_missing: 0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.paranoid_checks: 1 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.env: 0x55e7f140eca0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.fs: PosixFileSystem 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.info_log: 0x55e7f3755820 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_file_opening_threads: 16 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.statistics: (nil) 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.use_fsync: 0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_log_file_size: 0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.keep_log_file_num: 1000 2026-03-06T22:26:27.466 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.recycle_log_file_num: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.allow_fallocate: 1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.allow_mmap_reads: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.allow_mmap_writes: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.use_direct_reads: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.create_missing_column_families: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.db_log_dir: 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.wal_dir: 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.advise_random_on_open: 1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.db_write_buffer_size: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.write_buffer_manager: 0x55e7f3759900 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.rate_limiter: (nil) 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.wal_recovery_mode: 2 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.enable_thread_tracking: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.enable_pipelined_write: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.unordered_write: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.row_cache: None 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.wal_filter: None 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.allow_ingest_behind: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.two_write_queues: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.manual_wal_flush: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.wal_compression: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.atomic_flush: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.log_readahead_size: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.best_efforts_recovery: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.allow_data_in_errors: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.db_host_id: __hostname__ 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_background_jobs: 2 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_background_compactions: -1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_subcompactions: 1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_total_wal_size: 0 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_open_files: -1 2026-03-06T22:26:27.467 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bytes_per_sync: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_readahead_size: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_background_flushes: -1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Compression algorithms supported: 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kZSTD supported: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kXpressCompression supported: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kBZip2Compression supported: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kLZ4Compression supported: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kZlibCompression supported: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kLZ4HCCompression supported: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: kSnappyCompression supported: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000005 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.merge_operator: 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_filter: None 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_filter_factory: None 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.sst_partitioner_factory: None 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55e7f37541a0) 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: cache_index_and_filter_blocks: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: pin_top_level_index_and_filter: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: index_type: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: data_block_index_type: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: index_shortening: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: checksum: 4 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: no_block_cache: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache: 0x55e7f37791f0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache_name: BinnedLRUCache 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache_options: 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: capacity : 536870912 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: num_shard_bits : 4 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: strict_capacity_limit : 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: high_pri_pool_ratio: 0.000 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_cache_compressed: (nil) 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: persistent_cache: (nil) 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_size: 4096 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_size_deviation: 10 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_restart_interval: 16 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: index_block_restart_interval: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: metadata_block_size: 4096 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: partition_filters: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: use_delta_encoding: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: filter_policy: bloomfilter 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: whole_key_filtering: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: verify_compression: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: read_amp_bytes_per_bit: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: format_version: 5 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: enable_index_compression: 1 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: block_align: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: max_auto_readahead_size: 262144 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: prepopulate_block_cache: 0 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: initial_auto_readahead_size: 8192 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout: num_file_reads_for_auto_readahead: 2 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.write_buffer_size: 33554432 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_write_buffer_number: 2 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression: NoCompression 2026-03-06T22:26:27.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression: Disabled 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.prefix_extractor: nullptr 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.num_levels: 7 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.level: 32767 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.strategy: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.enabled: false 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.target_file_size_base: 67108864 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.arena_block_size: 1048576 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.disable_auto_compactions: 0 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-06T22:26:27.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.inplace_update_support: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.bloom_locality: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.max_successive_merges: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.paranoid_file_checks: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.force_consistency_checks: 1 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.report_bg_io_stats: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.ttl: 2592000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.enable_blob_files: false 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.min_blob_size: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.blob_file_size: 268435456 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.blob_file_starting_level: 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 15357529-794e-49f6-af1e-5f3628731b44 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772832387213973, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772832387223213, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772832387, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "15357529-794e-49f6-af1e-5f3628731b44", "db_session_id": "Z5AWG95NYFO0OHYF6B7K", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772832387223312, "job": 1, "event": "recovery_finished"} 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm06/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55e7f377ae00 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: DB pointer 0x55e7f3888000 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06 does not exist in monmap, will attempt to join an existing cluster 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: using public_addr v2:192.168.123.106:0/0 -> [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: starting mon.vm06 rank -1 at public addrs [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] at bind addrs [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon_data /var/lib/ceph/mon/ceph-vm06 fsid c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(???) e0 preinit fsid c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** DB Stats ** 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-06T22:26:27.470 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** Compaction Stats [default] ** 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.2 0.01 0.00 1 0.009 0 0 0.0 0.0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.2 0.01 0.00 1 0.009 0 0 0.0 0.0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.2 0.01 0.00 1 0.009 0 0 0.0 0.0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** Compaction Stats [default] ** 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.2 0.01 0.00 1 0.009 0 0 0.0 0.0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Cumulative compaction: 0.00 GB write, 0.08 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Interval compaction: 0.00 GB write, 0.08 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Block cache BinnedLRUCache@0x55e7f37791f0#2 capacity: 512.00 MB usage: 0.86 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 7e-06 secs_since: 0 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: Block cache entry stats(count,size,portion): DataBlock(1,0.64 KB,0.00012219%) FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).mds e1 new map 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).mds e1 print_map 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: e1 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: btime 2026-03-06T21:24:40:685056+0000 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: legacy client fscid: -1 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout: No filesystems configured 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/2447447370' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3195802147' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/4072808312' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3331887938' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/1790765301' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/1256759528' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Active manager daemon vm01.mrlynj restarted 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Activating manager daemon vm01.mrlynj 2026-03-06T22:26:27.471 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mgrmap e15: vm01.mrlynj(active, starting, since 0.0102076s) 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr metadata", "who": "vm01.mrlynj", "id": "vm01.mrlynj"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Manager daemon vm01.mrlynj is now available 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/mirror_snapshot_schedule"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.mrlynj/trash_purge_schedule"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mgrmap e16: vm01.mrlynj(active, since 1.02922s) 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: [06/Mar/2026:21:26:14] ENGINE Bus STARTING 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: [06/Mar/2026:21:26:14] ENGINE Serving on http://192.168.123.101:8765 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3658842093' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: [06/Mar/2026:21:26:15] ENGINE Serving on https://192.168.123.101:7150 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: [06/Mar/2026:21:26:15] ENGINE Bus STARTED 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: [06/Mar/2026:21:26:15] ENGINE Client ('192.168.123.101', 60066) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mgrmap e17: vm01.mrlynj(active, since 2s) 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3381680147' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm01:/etc/ceph/ceph.conf 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm06:/etc/ceph/ceph.conf 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm06:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm06:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.client.admin.keyring 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.client.admin.keyring 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.472 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Deploying daemon ceph-exporter.vm06 on vm06 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3061069879' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Deploying daemon crash.vm06 on vm06 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3588285611' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Deploying daemon node-exporter.vm06 on vm06 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3657758832' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.awlziz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm06.awlziz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Deploying daemon mgr.vm06.awlziz on vm06 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/4025801457' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: Deploying daemon mon.vm06 on vm06 2026-03-06T22:26:27.473 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:27 vm06 ceph-mon[52574]: mon.vm06@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-03-06T22:26:32.782 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: mon.vm01 calling monitor election 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: mon.vm06 calling monitor election 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: mon.vm01 is new leader, mons vm01,vm06 in quorum (ranks 0,1) 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: monmap epoch 2 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: fsid c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: last_changed 2026-03-06T21:26:27.291480+0000 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: created 2026-03-06T21:24:38.888443+0000 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: min_mon_release 19 (squid) 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: election_strategy: 1 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: 1: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: fsmap 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: mgrmap e17: vm01.mrlynj(active, since 19s) 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: overall HEALTH_OK 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:32.783 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:32 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:26:33.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: mon.vm01 calling monitor election 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: mon.vm06 calling monitor election 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: mon.vm01 is new leader, mons vm01,vm06 in quorum (ranks 0,1) 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: monmap epoch 2 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: fsid c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: last_changed 2026-03-06T21:26:27.291480+0000 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: created 2026-03-06T21:24:38.888443+0000 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: min_mon_release 19 (squid) 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: election_strategy: 1 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: 1: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: fsmap 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: mgrmap e17: vm01.mrlynj(active, since 19s) 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: overall HEALTH_OK 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:33.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:32 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:26:33.757 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-06T22:26:33.757 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":2,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","modified":"2026-03-06T21:26:27.291480Z","created":"2026-03-06T21:24:38.888443Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-06T22:26:33.757 INFO:teuthology.orchestra.run.vm06.stderr:dumped monmap epoch 2 2026-03-06T22:26:33.826 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-06T22:26:33.827 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph config generate-minimal-conf 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: Updating vm01:/etc/ceph/ceph.conf 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: Updating vm06:/etc/ceph/ceph.conf 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: Updating vm06:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm01.mrlynj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:34.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:33 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/4174081122' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: Updating vm01:/etc/ceph/ceph.conf 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: Updating vm06:/etc/ceph/ceph.conf 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: Updating vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: Updating vm06:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/config/ceph.conf 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm01.mrlynj", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:34.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:33 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/4174081122' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T22:26:34.206 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:34.581 INFO:teuthology.orchestra.run.vm01.stdout:# minimal ceph.conf for c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:26:34.581 INFO:teuthology.orchestra.run.vm01.stdout:[global] 2026-03-06T22:26:34.581 INFO:teuthology.orchestra.run.vm01.stdout: fsid = c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:26:34.581 INFO:teuthology.orchestra.run.vm01.stdout: mon_host = [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] 2026-03-06T22:26:34.647 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-06T22:26:34.647 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:26:34.647 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.conf 2026-03-06T22:26:34.679 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:26:34.679 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:26:34.744 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-06T22:26:34.744 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.conf 2026-03-06T22:26:34.771 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-06T22:26:34.771 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-06T22:26:34.842 INFO:tasks.cephadm:Deploying OSDs... 2026-03-06T22:26:34.842 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:26:34.842 DEBUG:teuthology.orchestra.run.vm01:> dd if=/scratch_devs of=/dev/stdout 2026-03-06T22:26:34.865 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:26:34.865 DEBUG:teuthology.orchestra.run.vm01:> ls /dev/[sv]d? 2026-03-06T22:26:34.941 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vda 2026-03-06T22:26:34.941 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdb 2026-03-06T22:26:34.941 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdc 2026-03-06T22:26:34.941 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdd 2026-03-06T22:26:34.941 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vde 2026-03-06T22:26:34.941 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-06T22:26:34.941 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-06T22:26:34.941 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdb 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdb 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-06 22:25:37.560948890 +0100 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-06 22:23:53.912758940 +0100 2026-03-06T22:26:35.009 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-06 22:23:53.912758940 +0100 2026-03-06T22:26:35.010 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-06 22:20:48.283000000 +0100 2026-03-06T22:26:35.010 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-06T22:26:35.082 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-06T22:26:35.082 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-06T22:26:35.082 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000198522 s, 2.6 MB/s 2026-03-06T22:26:35.084 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-06T22:26:35.143 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdc 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdc 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 222 Links: 1 Device type: fc,20 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-06 22:25:37.593948893 +0100 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-06 22:23:53.914758940 +0100 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-06 22:23:53.914758940 +0100 2026-03-06T22:26:35.204 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-06 22:20:48.290000000 +0100 2026-03-06T22:26:35.204 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-06T22:26:35.211 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: Reconfiguring mon.vm01 (unknown last config time)... 2026-03-06T22:26:35.211 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: Reconfiguring daemon mon.vm01 on vm01 2026-03-06T22:26:35.211 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:35.211 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: Reconfiguring mgr.vm01.mrlynj (unknown last config time)... 2026-03-06T22:26:35.211 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: Reconfiguring daemon mgr.vm01.mrlynj on vm01 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/974895785' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.212 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:34 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.274 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: Reconfiguring mon.vm01 (unknown last config time)... 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: Reconfiguring daemon mon.vm01 on vm01 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: Reconfiguring mgr.vm01.mrlynj (unknown last config time)... 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: Reconfiguring daemon mgr.vm01.mrlynj on vm01 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/974895785' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.275 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:34 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:35.277 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-06T22:26:35.277 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-06T22:26:35.277 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000204654 s, 2.5 MB/s 2026-03-06T22:26:35.278 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-06T22:26:35.352 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdd 2026-03-06T22:26:35.417 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdd 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-06 22:25:37.639948897 +0100 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-06 22:23:53.954758937 +0100 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-06 22:23:53.954758937 +0100 2026-03-06T22:26:35.418 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-06 22:20:48.301000000 +0100 2026-03-06T22:26:35.418 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-06T22:26:35.488 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-06T22:26:35.488 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-06T22:26:35.488 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000162965 s, 3.1 MB/s 2026-03-06T22:26:35.489 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-06T22:26:35.559 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vde 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vde 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-06 22:25:37.677948900 +0100 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-06 22:23:53.933758939 +0100 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-06 22:23:53.933758939 +0100 2026-03-06T22:26:35.628 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-06 22:20:48.376000000 +0100 2026-03-06T22:26:35.628 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-06T22:26:35.695 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-06T22:26:35.695 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-06T22:26:35.695 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000120234 s, 4.3 MB/s 2026-03-06T22:26:35.696 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-06T22:26:35.754 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-06T22:26:35.754 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-06T22:26:35.774 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:26:35.774 DEBUG:teuthology.orchestra.run.vm06:> ls /dev/[sv]d? 2026-03-06T22:26:35.832 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vda 2026-03-06T22:26:35.832 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb 2026-03-06T22:26:35.832 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc 2026-03-06T22:26:35.832 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd 2026-03-06T22:26:35.832 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde 2026-03-06T22:26:35.833 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-06T22:26:35.833 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-06T22:26:35.833 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdb 2026-03-06T22:26:35.893 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdb 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,10 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-06 22:26:17.826673656 +0100 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-06 22:23:52.955313168 +0100 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-06 22:23:52.955313168 +0100 2026-03-06T22:26:35.909 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-06 22:21:31.312000000 +0100 2026-03-06T22:26:35.909 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-06T22:26:35.962 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-06T22:26:35.962 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-06T22:26:35.963 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000174566 s, 2.9 MB/s 2026-03-06T22:26:35.964 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-06T22:26:36.027 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdc 2026-03-06T22:26:36.089 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdc 2026-03-06T22:26:36.089 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:36.090 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-06T22:26:36.090 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:36.090 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:36.090 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-06 22:26:17.859673470 +0100 2026-03-06T22:26:36.090 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-06 22:23:52.954313168 +0100 2026-03-06T22:26:36.090 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-06 22:23:52.954313168 +0100 2026-03-06T22:26:36.090 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-06 22:21:31.328000000 +0100 2026-03-06T22:26:36.090 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-06T22:26:36.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:36 vm01 ceph-mon[46942]: Reconfiguring crash.vm01 (monmap changed)... 2026-03-06T22:26:36.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:36 vm01 ceph-mon[46942]: Reconfiguring daemon crash.vm01 on vm01 2026-03-06T22:26:36.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:36 vm01 ceph-mon[46942]: Reconfiguring alertmanager.vm01 (dependencies changed)... 2026-03-06T22:26:36.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:36 vm01 ceph-mon[46942]: Reconfiguring daemon alertmanager.vm01 on vm01 2026-03-06T22:26:36.160 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-06T22:26:36.160 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-06T22:26:36.160 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000199822 s, 2.6 MB/s 2026-03-06T22:26:36.161 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-06T22:26:36.226 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdd 2026-03-06T22:26:36.287 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdd 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-06 22:26:17.886673318 +0100 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-06 22:23:52.930313168 +0100 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-06 22:23:52.930313168 +0100 2026-03-06T22:26:36.288 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-06 22:21:31.332000000 +0100 2026-03-06T22:26:36.288 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-06T22:26:36.354 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:36 vm06 ceph-mon[52574]: Reconfiguring crash.vm01 (monmap changed)... 2026-03-06T22:26:36.355 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:36 vm06 ceph-mon[52574]: Reconfiguring daemon crash.vm01 on vm01 2026-03-06T22:26:36.355 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:36 vm06 ceph-mon[52574]: Reconfiguring alertmanager.vm01 (dependencies changed)... 2026-03-06T22:26:36.355 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:36 vm06 ceph-mon[52574]: Reconfiguring daemon alertmanager.vm01 on vm01 2026-03-06T22:26:36.357 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-06T22:26:36.357 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-06T22:26:36.357 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000172342 s, 3.0 MB/s 2026-03-06T22:26:36.358 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-06T22:26:36.421 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vde 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vde 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-06 22:26:17.917673143 +0100 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-06 22:23:52.985313168 +0100 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-06 22:23:52.985313168 +0100 2026-03-06T22:26:36.483 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-06 22:21:31.336000000 +0100 2026-03-06T22:26:36.484 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-06T22:26:36.551 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-06T22:26:36.551 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-06T22:26:36.551 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000203731 s, 2.5 MB/s 2026-03-06T22:26:36.552 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-06T22:26:36.614 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch apply osd --all-available-devices 2026-03-06T22:26:37.000 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm06/config 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: Standby manager daemon vm06.awlziz started 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm06.awlziz/crt"}]: dispatch 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm06.awlziz/key"}]: dispatch 2026-03-06T22:26:37.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:37 vm06 ceph-mon[52574]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: Standby manager daemon vm06.awlziz started 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm06.awlziz/crt"}]: dispatch 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm06.awlziz/key"}]: dispatch 2026-03-06T22:26:37.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:37 vm01 ceph-mon[46942]: from='mgr.? 192.168.123.106:0/1209823228' entity='mgr.vm06.awlziz' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-06T22:26:37.381 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled osd.all-available-devices update... 2026-03-06T22:26:37.487 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-06T22:26:37.487 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:38.030 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:38 vm01 ceph-mon[46942]: Reconfiguring grafana.vm01 (dependencies changed)... 2026-03-06T22:26:38.030 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:38 vm01 ceph-mon[46942]: Reconfiguring daemon grafana.vm01 on vm01 2026-03-06T22:26:38.030 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:38 vm01 ceph-mon[46942]: mgrmap e18: vm01.mrlynj(active, since 23s), standbys: vm06.awlziz 2026-03-06T22:26:38.030 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:38 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr metadata", "who": "vm06.awlziz", "id": "vm06.awlziz"}]: dispatch 2026-03-06T22:26:38.030 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:38 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:38.030 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:38 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:38.030 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:38 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:38.063 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:38 vm06 ceph-mon[52574]: Reconfiguring grafana.vm01 (dependencies changed)... 2026-03-06T22:26:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:38 vm06 ceph-mon[52574]: Reconfiguring daemon grafana.vm01 on vm01 2026-03-06T22:26:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:38 vm06 ceph-mon[52574]: mgrmap e18: vm01.mrlynj(active, since 23s), standbys: vm06.awlziz 2026-03-06T22:26:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:38 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr metadata", "who": "vm06.awlziz", "id": "vm06.awlziz"}]: dispatch 2026-03-06T22:26:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:38 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:38 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:38 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:38.517 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:38.612 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: Reconfiguring prometheus.vm01 (dependencies changed)... 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: from='client.24099 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: Marking host: vm01 for OSDSpec preview refresh. 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: Marking host: vm06 for OSDSpec preview refresh. 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: Saving service osd.all-available-devices spec with placement * 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: Reconfiguring daemon prometheus.vm01 on vm01 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2845404898' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T22:26:39.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:39 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: Reconfiguring prometheus.vm01 (dependencies changed)... 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: from='client.24099 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: Marking host: vm01 for OSDSpec preview refresh. 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: Marking host: vm06 for OSDSpec preview refresh. 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: Saving service osd.all-available-devices spec with placement * 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: Reconfiguring daemon prometheus.vm01 on vm01 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2845404898' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T22:26:39.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:39 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:39.612 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:39.949 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:40.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.awlziz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T22:26:40.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:40 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:40.317 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm06.awlziz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T22:26:40.406 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:40 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:40.443 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: Reconfiguring crash.vm06 (monmap changed)... 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: Reconfiguring daemon crash.vm06 on vm06 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: Reconfiguring mgr.vm06.awlziz (monmap changed)... 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: Reconfiguring daemon mgr.vm06.awlziz on vm06 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: Reconfiguring mon.vm06 (monmap changed)... 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: Reconfiguring daemon mon.vm06 on vm06 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2606105606' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:41 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: Reconfiguring crash.vm06 (monmap changed)... 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: Reconfiguring daemon crash.vm06 on vm06 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: Reconfiguring mgr.vm06.awlziz (monmap changed)... 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: Reconfiguring daemon mgr.vm06.awlziz on vm06 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: Reconfiguring mon.vm06 (monmap changed)... 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: Reconfiguring daemon mon.vm06 on vm06 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2606105606' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:41.329 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:41 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:26:41.444 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:41.823 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:42.198 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:42.366 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-06T22:26:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:42 vm01 ceph-mon[46942]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:42 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:42 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:42 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:42 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:42 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2075521827' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:43.367 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:43.370 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:42 vm06 ceph-mon[52574]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:43.370 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:42 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.370 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:42 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.370 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:42 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.370 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:42 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:43.370 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:42 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2075521827' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:43.764 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:44.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:43 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:44.166 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:44.253 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:26:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:43 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1697071449' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/257986324' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a7899fe4-5f7a-4604-8760-1e2f968c5695"}]: dispatch 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a7899fe4-5f7a-4604-8760-1e2f968c5695"}]: dispatch 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a7899fe4-5f7a-4604-8760-1e2f968c5695"}]': finished 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: osdmap e6: 1 total, 0 up, 1 in 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3035562826' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5d21bba8-8bc4-4ec1-b646-fc50b522c81d"}]: dispatch 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3035562826' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5d21bba8-8bc4-4ec1-b646-fc50b522c81d"}]': finished 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: osdmap e7: 2 total, 0 up, 2 in 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:44.989 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:44 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1697071449' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/257986324' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a7899fe4-5f7a-4604-8760-1e2f968c5695"}]: dispatch 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a7899fe4-5f7a-4604-8760-1e2f968c5695"}]: dispatch 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a7899fe4-5f7a-4604-8760-1e2f968c5695"}]': finished 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: osdmap e6: 1 total, 0 up, 1 in 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3035562826' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5d21bba8-8bc4-4ec1-b646-fc50b522c81d"}]: dispatch 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3035562826' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5d21bba8-8bc4-4ec1-b646-fc50b522c81d"}]': finished 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: osdmap e7: 2 total, 0 up, 2 in 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:45.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:44 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:45.254 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:45.602 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:45.954 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:46.037 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1772832404,"num_remapped_pgs":0} 2026-03-06T22:26:46.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:45 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/1808167223' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:46.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:45 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/4108049254' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:46.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:45 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/4064935177' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:45 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/1808167223' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:45 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/4108049254' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:45 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/4064935177' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:47.038 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:47.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:47 vm01 ceph-mon[46942]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:47.370 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:47 vm06 ceph-mon[52574]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:47.786 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:48.266 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1772832404,"num_remapped_pgs":0} 2026-03-06T22:26:48.324 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:48 vm06 ceph-mon[52574]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:48.324 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:48 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1392149970' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:48.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:48 vm01 ceph-mon[46942]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:48.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:48 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1392149970' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:49.267 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2085598171' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5edc145-cbe2-4519-bb3f-fb428e3e820f"}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2085598171' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d5edc145-cbe2-4519-bb3f-fb428e3e820f"}]': finished 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: osdmap e8: 3 total, 0 up, 3 in 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/2320928042' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3ad582a8-03b4-42cf-844c-b79d2701f629"}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3ad582a8-03b4-42cf-844c-b79d2701f629"}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3ad582a8-03b4-42cf-844c-b79d2701f629"}]': finished 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: osdmap e9: 4 total, 0 up, 4 in 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:49.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:49 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2558874036' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:49.634 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2085598171' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5edc145-cbe2-4519-bb3f-fb428e3e820f"}]: dispatch 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2085598171' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d5edc145-cbe2-4519-bb3f-fb428e3e820f"}]': finished 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: osdmap e8: 3 total, 0 up, 3 in 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/2320928042' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3ad582a8-03b4-42cf-844c-b79d2701f629"}]: dispatch 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3ad582a8-03b4-42cf-844c-b79d2701f629"}]: dispatch 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3ad582a8-03b4-42cf-844c-b79d2701f629"}]': finished 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: osdmap e9: 4 total, 0 up, 4 in 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:49.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:49.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:49.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:49.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:49 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2558874036' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:49.980 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:50.048 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1772832408,"num_remapped_pgs":0} 2026-03-06T22:26:50.255 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:50 vm01 ceph-mon[46942]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:50.255 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:50 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3935436266' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:50.255 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:50 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2410447363' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:50.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:50 vm06 ceph-mon[52574]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:50.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:50 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3935436266' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:50.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:50 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2410447363' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:51.049 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:51.371 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:51.693 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:51.861 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1772832408,"num_remapped_pgs":0} 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/327793424' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3590212737' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b83f2bbe-cfb7-4b75-ac34-55fca55be32a"}]: dispatch 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b83f2bbe-cfb7-4b75-ac34-55fca55be32a"}]: dispatch 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b83f2bbe-cfb7-4b75-ac34-55fca55be32a"}]': finished 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: osdmap e10: 5 total, 0 up, 5 in 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:52.273 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:52 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/327793424' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3590212737' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b83f2bbe-cfb7-4b75-ac34-55fca55be32a"}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b83f2bbe-cfb7-4b75-ac34-55fca55be32a"}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b83f2bbe-cfb7-4b75-ac34-55fca55be32a"}]': finished 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: osdmap e10: 5 total, 0 up, 5 in 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:52.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:52 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:52.862 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:53.196 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1840584672' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "507447ec-8f5a-48a8-8611-bf4a66e8bbc1"}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1840584672' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "507447ec-8f5a-48a8-8611-bf4a66e8bbc1"}]': finished 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: osdmap e11: 6 total, 0 up, 6 in 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/540848283' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:53.530 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:53 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1050211139' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:53.530 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:53.601 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1772832412,"num_remapped_pgs":0} 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1840584672' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "507447ec-8f5a-48a8-8611-bf4a66e8bbc1"}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1840584672' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "507447ec-8f5a-48a8-8611-bf4a66e8bbc1"}]': finished 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: osdmap e11: 6 total, 0 up, 6 in 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/540848283' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:53 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1050211139' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:54.602 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:54.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:54 vm01 ceph-mon[46942]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:54.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:54 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/791580239' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:54.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:54 vm06 ceph-mon[52574]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:54.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:54 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/791580239' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:54.954 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:55.295 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:55.365 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1772832412,"num_remapped_pgs":0} 2026-03-06T22:26:56.321 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/788624370' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3335297158' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "91b15bb3-09a4-485e-92be-1d84ac40349c"}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "91b15bb3-09a4-485e-92be-1d84ac40349c"}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "91b15bb3-09a4-485e-92be-1d84ac40349c"}]': finished 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: osdmap e12: 7 total, 0 up, 7 in 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/3111491870' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:56.322 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:56 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2123829127' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c"}]: dispatch 2026-03-06T22:26:56.366 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/788624370' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3335297158' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "91b15bb3-09a4-485e-92be-1d84ac40349c"}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "91b15bb3-09a4-485e-92be-1d84ac40349c"}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "91b15bb3-09a4-485e-92be-1d84ac40349c"}]': finished 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: osdmap e12: 7 total, 0 up, 7 in 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/3111491870' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:56.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:56 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2123829127' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c"}]: dispatch 2026-03-06T22:26:56.741 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:57.087 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:57.138 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c"}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c"}]': finished 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: osdmap e13: 8 total, 0 up, 8 in 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3220225700' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:57.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:57 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3839463616' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c"}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c"}]': finished 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: osdmap e13: 8 total, 0 up, 8 in 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3220225700' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T22:26:57.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:57 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3839463616' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:58.138 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:26:58.480 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:26:58.509 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:58 vm01 ceph-mon[46942]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:58.509 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:58 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:58.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:58 vm06 ceph-mon[52574]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:26:58.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:58 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:26:58.859 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:26:58.933 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:26:59.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:26:59 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3770091118' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:59.620 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:26:59 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3770091118' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:26:59.935 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:00.360 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:00.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:00 vm01 ceph-mon[46942]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:00.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:00 vm06 ceph-mon[52574]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:00.803 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:00.924 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:01.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:01 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-06T22:27:01.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:01 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:01.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:01 vm01 ceph-mon[46942]: Deploying daemon osd.0 on vm06 2026-03-06T22:27:01.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:01 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-06T22:27:01.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:01 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:01.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:01 vm01 ceph-mon[46942]: Deploying daemon osd.1 on vm01 2026-03-06T22:27:01.370 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:01 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2986901611' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:01.593 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:01 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-06T22:27:01.593 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:01 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:01.593 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:01 vm06 ceph-mon[52574]: Deploying daemon osd.0 on vm06 2026-03-06T22:27:01.593 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:01 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-06T22:27:01.593 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:01 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:01.593 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:01 vm06 ceph-mon[52574]: Deploying daemon osd.1 on vm01 2026-03-06T22:27:01.593 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:01 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2986901611' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:01.925 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:02.389 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:02.446 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:02 vm01 ceph-mon[46942]: pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:02.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:02 vm06 ceph-mon[52574]: pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:02.764 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:02.837 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:03.424 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:03 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1501881975' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:03.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:03 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1501881975' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:03.837 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:04.221 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: pgmap v26: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: Deploying daemon osd.3 on vm06 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-06T22:27:04.652 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:04 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:04.672 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:04.743 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: pgmap v26: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: Deploying daemon osd.3 on vm06 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-06T22:27:04.764 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:04 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:05.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:05 vm06 ceph-mon[52574]: Deploying daemon osd.2 on vm01 2026-03-06T22:27:05.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:05 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2340579567' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:05.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:05 vm06 ceph-mon[52574]: from='osd.0 [v2:192.168.123.106:6800/1059922460,v1:192.168.123.106:6801/1059922460]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T22:27:05.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:05 vm06 ceph-mon[52574]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T22:27:05.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:05 vm06 ceph-mon[52574]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-06T22:27:05.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:05 vm01 ceph-mon[46942]: Deploying daemon osd.2 on vm01 2026-03-06T22:27:05.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:05 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2340579567' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:05.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:05 vm01 ceph-mon[46942]: from='osd.0 [v2:192.168.123.106:6800/1059922460,v1:192.168.123.106:6801/1059922460]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T22:27:05.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:05 vm01 ceph-mon[46942]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T22:27:05.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:05 vm01 ceph-mon[46942]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-06T22:27:05.744 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:06.137 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:06.567 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:06.641 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:06.761 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":15,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: osdmap e14: 8 total, 0 up, 8 in 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='osd.0 [v2:192.168.123.106:6800/1059922460,v1:192.168.123.106:6801/1059922460]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:06.770 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:06 vm06 ceph-mon[52574]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:06.870 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: osdmap e14: 8 total, 0 up, 8 in 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='osd.0 [v2:192.168.123.106:6800/1059922460,v1:192.168.123.106:6801/1059922460]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:06.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:06 vm01 ceph-mon[46942]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: osdmap e15: 8 total, 0 up, 8 in 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2990831681' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: Deploying daemon osd.4 on vm06 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:07.758 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:07 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:07.761 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: osdmap e15: 8 total, 0 up, 8 in 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2990831681' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: Deploying daemon osd.4 on vm06 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490]' entity='osd.1' 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:07.834 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:07 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:08.156 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:08.562 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:08.658 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":16,"num_osds":8,"num_up_osds":2,"osd_up_since":1772832427,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: osd.0 [v2:192.168.123.106:6800/1059922460,v1:192.168.123.106:6801/1059922460] boot 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490] boot 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: osdmap e16: 8 total, 2 up, 8 in 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: Deploying daemon osd.5 on vm01 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='osd.3 [v2:192.168.123.106:6808/2887797947,v1:192.168.123.106:6809/2887797947]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-06T22:27:08.813 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:08 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3026921947' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: osd.0 [v2:192.168.123.106:6800/1059922460,v1:192.168.123.106:6801/1059922460] boot 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: osd.1 [v2:192.168.123.101:6802/2762929490,v1:192.168.123.101:6803/2762929490] boot 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: osdmap e16: 8 total, 2 up, 8 in 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: Deploying daemon osd.5 on vm01 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='osd.3 [v2:192.168.123.106:6808/2887797947,v1:192.168.123.106:6809/2887797947]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-06T22:27:08.851 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:08 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3026921947' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:09.658 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:10.085 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:10.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-06T22:27:10.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: osdmap e17: 8 total, 2 up, 8 in 2026-03-06T22:27:10.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:10.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:10.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:10.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:10.111 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:10.112 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:10.112 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='osd.3 [v2:192.168.123.106:6808/2887797947,v1:192.168.123.106:6809/2887797947]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:10.112 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:10.112 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:09 vm06 ceph-mon[52574]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: osdmap e17: 8 total, 2 up, 8 in 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='osd.3 [v2:192.168.123.106:6808/2887797947,v1:192.168.123.106:6809/2887797947]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:10.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:09 vm01 ceph-mon[46942]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-06T22:27:10.586 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:10.673 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":18,"num_osds":8,"num_up_osds":2,"osd_up_since":1772832427,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:10.917 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-06T22:27:10.917 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:10.917 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-06T22:27:10.917 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: osdmap e18: 8 total, 2 up, 8 in 2026-03-06T22:27:10.917 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:10.917 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:10.917 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/4159514799' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:10.918 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:10 vm06 ceph-mon[52574]: osdmap e19: 8 total, 2 up, 8 in 2026-03-06T22:27:11.120 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: osdmap e18: 8 total, 2 up, 8 in 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/4159514799' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: from='osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:11.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:10 vm01 ceph-mon[46942]: osdmap e19: 8 total, 2 up, 8 in 2026-03-06T22:27:11.674 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: Deploying daemon osd.6 on vm06 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='osd.3 ' entity='osd.3' 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='osd.4 [v2:192.168.123.106:6816/3680182952,v1:192.168.123.106:6817/3680182952]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:12.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:11 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: Deploying daemon osd.6 on vm06 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='osd.3 ' entity='osd.3' 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='osd.4 [v2:192.168.123.106:6816/3680182952,v1:192.168.123.106:6817/3680182952]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T22:27:12.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:12.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:11 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:12.167 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:12.541 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:12.610 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":20,"num_osds":8,"num_up_osds":4,"osd_up_since":1772832431,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:13.086 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:13.086 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:13.086 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: pgmap v36: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: Deploying daemon osd.7 on vm01 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742] boot 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: osd.3 [v2:192.168.123.106:6808/2887797947,v1:192.168.123.106:6809/2887797947] boot 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: osdmap e20: 8 total, 4 up, 8 in 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='osd.4 [v2:192.168.123.106:6816/3680182952,v1:192.168.123.106:6817/3680182952]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:13.087 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:12 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/333644635' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: pgmap v36: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: Deploying daemon osd.7 on vm01 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: osd.2 [v2:192.168.123.101:6810/3040999742,v1:192.168.123.101:6811/3040999742] boot 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: osd.3 [v2:192.168.123.106:6808/2887797947,v1:192.168.123.106:6809/2887797947] boot 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: osdmap e20: 8 total, 4 up, 8 in 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='osd.4 [v2:192.168.123.106:6816/3680182952,v1:192.168.123.106:6817/3680182952]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:13.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:12 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/333644635' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:13.611 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:13.912 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:13.912 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: osdmap e21: 8 total, 4 up, 8 in 2026-03-06T22:27:13.912 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:13.913 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:13 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:14.084 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: osdmap e21: 8 total, 4 up, 8 in 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:14.165 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:13 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:14.617 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:14.688 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":22,"num_osds":8,"num_up_osds":5,"osd_up_since":1772832433,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: pgmap v39: 0 pgs: ; 0 B data, 505 MiB used, 79 GiB / 80 GiB avail 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: osd.4 [v2:192.168.123.106:6816/3680182952,v1:192.168.123.106:6817/3680182952] boot 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: osdmap e22: 8 total, 5 up, 8 in 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1409900878' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: osdmap e23: 8 total, 5 up, 8 in 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:15.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:14 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: pgmap v39: 0 pgs: ; 0 B data, 505 MiB used, 79 GiB / 80 GiB avail 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: osd.4 [v2:192.168.123.106:6816/3680182952,v1:192.168.123.106:6817/3680182952] boot 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: osdmap e22: 8 total, 5 up, 8 in 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1409900878' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: osdmap e23: 8 total, 5 up, 8 in 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:15.342 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:14 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:15.688 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='osd.6 [v2:192.168.123.106:6824/3714035948,v1:192.168.123.106:6825/3714035948]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951] boot 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: osdmap e24: 8 total, 6 up, 8 in 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='osd.6 [v2:192.168.123.106:6824/3714035948,v1:192.168.123.106:6825/3714035948]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:15.994 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:15 vm01 ceph-mon[46942]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:16.125 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='osd.6 [v2:192.168.123.106:6824/3714035948,v1:192.168.123.106:6825/3714035948]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: osd.5 [v2:192.168.123.101:6818/1440378951,v1:192.168.123.101:6819/1440378951] boot 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: osdmap e24: 8 total, 6 up, 8 in 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='osd.6 [v2:192.168.123.106:6824/3714035948,v1:192.168.123.106:6825/3714035948]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:15 vm06 ceph-mon[52574]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-06T22:27:16.464 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:16.536 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":24,"num_osds":8,"num_up_osds":6,"osd_up_since":1772832435,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":1} 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: pgmap v42: 1 pgs: 1 unknown; 0 B data, 532 MiB used, 99 GiB / 100 GiB avail 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1436273775' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: osdmap e25: 8 total, 6 up, 8 in 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:17.181 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:17.182 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:16 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: pgmap v42: 1 pgs: 1 unknown; 0 B data, 532 MiB used, 99 GiB / 100 GiB avail 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1436273775' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: osdmap e25: 8 total, 6 up, 8 in 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:17.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:16 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:17.537 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:17.952 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:18.267 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:18.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:18.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: pgmap v45: 1 pgs: 1 unknown; 0 B data, 558 MiB used, 119 GiB / 120 GiB avail 2026-03-06T22:27:18.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:18.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:18.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-06T22:27:18.268 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:27:18.374 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: pgmap v45: 1 pgs: 1 unknown; 0 B data, 558 MiB used, 119 GiB / 120 GiB avail 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: osdmap e26: 8 total, 6 up, 8 in 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='osd.6 ' entity='osd.6' 2026-03-06T22:27:18.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:18 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:18.474 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":26,"num_osds":8,"num_up_osds":6,"osd_up_since":1772832435,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":1} 2026-03-06T22:27:18.521 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-06T22:27:18.521 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: osdmap e26: 8 total, 6 up, 8 in 2026-03-06T22:27:18.521 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-06T22:27:18.521 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:18.521 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:18.521 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='osd.6 ' entity='osd.6' 2026-03-06T22:27:18.521 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:18 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:19.474 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd stat -f json 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/777714554' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: osd.6 [v2:192.168.123.106:6824/3714035948,v1:192.168.123.106:6825/3714035948] boot 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: osdmap e27: 8 total, 7 up, 8 in 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:19 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/777714554' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: osd.6 [v2:192.168.123.106:6824/3714035948,v1:192.168.123.106:6825/3714035948] boot 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: osdmap e27: 8 total, 7 up, 8 in 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.655 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:19 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:19.984 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:20.380 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:20.476 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":28,"num_osds":8,"num_up_osds":8,"osd_up_since":1772832439,"num_in_osds":8,"osd_in_since":1772832416,"num_remapped_pgs":0} 2026-03-06T22:27:20.476 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd dump --format=json 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: purged_snaps scrub starts 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: purged_snaps scrub ok 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: pgmap v48: 1 pgs: 1 unknown; 0 B data, 185 MiB used, 140 GiB / 140 GiB avail 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947] boot 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: osdmap e28: 8 total, 8 up, 8 in 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3365661093' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:20.823 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:20 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:27:20.848 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: purged_snaps scrub starts 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: purged_snaps scrub ok 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: pgmap v48: 1 pgs: 1 unknown; 0 B data, 185 MiB used, 140 GiB / 140 GiB avail 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: from='osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947]' entity='osd.7' 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: osd.7 [v2:192.168.123.101:6826/297143947,v1:192.168.123.101:6827/297143947] boot 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: osdmap e28: 8 total, 8 up, 8 in 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3365661093' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:21.095 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:20 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:27:21.317 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:21.317 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":29,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","created":"2026-03-06T21:24:40.690850+0000","modified":"2026-03-06T21:27:20.844055+0000","last_up_change":"2026-03-06T21:27:19.841419+0000","last_in_change":"2026-03-06T21:26:56.266357+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":15,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-06T21:27:13.277242+0000","flags":32769,"flags_names":"hashpspool,creating","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"23","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"a7899fe4-5f7a-4604-8760-1e2f968c5695","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6801","nonce":1059922460}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6803","nonce":1059922460}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6807","nonce":1059922460}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6805","nonce":1059922460}]},"public_addr":"192.168.123.106:6801/1059922460","cluster_addr":"192.168.123.106:6803/1059922460","heartbeat_back_addr":"192.168.123.106:6807/1059922460","heartbeat_front_addr":"192.168.123.106:6805/1059922460","state":["exists","up"]},{"osd":1,"uuid":"5d21bba8-8bc4-4ec1-b646-fc50b522c81d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6803","nonce":2762929490}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6805","nonce":2762929490}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6809","nonce":2762929490}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6807","nonce":2762929490}]},"public_addr":"192.168.123.101:6803/2762929490","cluster_addr":"192.168.123.101:6805/2762929490","heartbeat_back_addr":"192.168.123.101:6809/2762929490","heartbeat_front_addr":"192.168.123.101:6807/2762929490","state":["exists","up"]},{"osd":2,"uuid":"d5edc145-cbe2-4519-bb3f-fb428e3e820f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6811","nonce":3040999742}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6813","nonce":3040999742}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6817","nonce":3040999742}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6815","nonce":3040999742}]},"public_addr":"192.168.123.101:6811/3040999742","cluster_addr":"192.168.123.101:6813/3040999742","heartbeat_back_addr":"192.168.123.101:6817/3040999742","heartbeat_front_addr":"192.168.123.101:6815/3040999742","state":["exists","up"]},{"osd":3,"uuid":"3ad582a8-03b4-42cf-844c-b79d2701f629","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6809","nonce":2887797947}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6811","nonce":2887797947}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6815","nonce":2887797947}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6813","nonce":2887797947}]},"public_addr":"192.168.123.106:6809/2887797947","cluster_addr":"192.168.123.106:6811/2887797947","heartbeat_back_addr":"192.168.123.106:6815/2887797947","heartbeat_front_addr":"192.168.123.106:6813/2887797947","state":["exists","up"]},{"osd":4,"uuid":"b83f2bbe-cfb7-4b75-ac34-55fca55be32a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6817","nonce":3680182952}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6819","nonce":3680182952}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6823","nonce":3680182952}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6821","nonce":3680182952}]},"public_addr":"192.168.123.106:6817/3680182952","cluster_addr":"192.168.123.106:6819/3680182952","heartbeat_back_addr":"192.168.123.106:6823/3680182952","heartbeat_front_addr":"192.168.123.106:6821/3680182952","state":["exists","up"]},{"osd":5,"uuid":"507447ec-8f5a-48a8-8611-bf4a66e8bbc1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6819","nonce":1440378951}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6821","nonce":1440378951}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6825","nonce":1440378951}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6823","nonce":1440378951}]},"public_addr":"192.168.123.101:6819/1440378951","cluster_addr":"192.168.123.101:6821/1440378951","heartbeat_back_addr":"192.168.123.101:6825/1440378951","heartbeat_front_addr":"192.168.123.101:6823/1440378951","state":["exists","up"]},{"osd":6,"uuid":"91b15bb3-09a4-485e-92be-1d84ac40349c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6825","nonce":3714035948}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6827","nonce":3714035948}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6831","nonce":3714035948}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6829","nonce":3714035948}]},"public_addr":"192.168.123.106:6825/3714035948","cluster_addr":"192.168.123.106:6827/3714035948","heartbeat_back_addr":"192.168.123.106:6831/3714035948","heartbeat_front_addr":"192.168.123.106:6829/3714035948","state":["exists","up"]},{"osd":7,"uuid":"0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6827","nonce":297143947}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6829","nonce":297143947}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6833","nonce":297143947}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6831","nonce":297143947}]},"public_addr":"192.168.123.101:6827/297143947","cluster_addr":"192.168.123.101:6829/297143947","heartbeat_back_addr":"192.168.123.101:6833/297143947","heartbeat_front_addr":"192.168.123.101:6831/297143947","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:06.157777+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:06.487693+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:10.435760+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:09.504310+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:12.783211+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:13.980373+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:15.956329+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:18.335858+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/2226875238":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/813203294":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/3853036737":"2026-03-07T21:25:31.761348+0000","192.168.123.101:0/2217362490":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/3791407434":"2026-03-07T21:25:09.037216+0000","192.168.123.101:6800/100672136":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/1533654466":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6801/100672136":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/3777790225":"2026-03-07T21:25:09.037216+0000","192.168.123.101:6800/1153325880":"2026-03-07T21:26:13.213414+0000","192.168.123.101:6801/1153325880":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/781181018":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6800/3028204436":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6801/3028204436":"2026-03-07T21:25:31.761348+0000","192.168.123.101:0/4042132768":"2026-03-07T21:26:13.213414+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-06T22:27:21.329 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 sudo[76191]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-06T22:27:21.329 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 sudo[76191]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-06T22:27:21.329 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 sudo[76191]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-06T22:27:21.329 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 sudo[76191]: pam_unix(sudo:session): session closed for user root 2026-03-06T22:27:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 sudo[67570]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-06T22:27:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 sudo[67570]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-06T22:27:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 sudo[67570]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-06T22:27:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 sudo[67570]: pam_unix(sudo:session): session closed for user root 2026-03-06T22:27:21.865 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-06T21:27:13.277242+0000', 'flags': 32769, 'flags_names': 'hashpspool,creating', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '23', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-03-06T22:27:21.865 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd pool get .mgr pg_num 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: Detected new or changed devices on vm06 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: osdmap e29: 8 total, 8 up, 8 in 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/4119163566' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:21.886 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:21 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:27:22.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: Detected new or changed devices on vm06 2026-03-06T22:27:22.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: osdmap e29: 8 total, 8 up, 8 in 2026-03-06T22:27:22.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm01"}]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mon metadata", "id": "vm06"}]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/4119163566' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:22.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:21 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:27:22.197 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:22.522 INFO:teuthology.orchestra.run.vm01.stdout:pg_num: 1 2026-03-06T22:27:22.574 INFO:tasks.cephadm:Setting up client nodes... 2026-03-06T22:27:22.574 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-06T22:27:22.938 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:23.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:22 vm01 ceph-mon[46942]: pgmap v51: 1 pgs: 1 unknown; 0 B data, 459 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:23.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:22 vm01 ceph-mon[46942]: Detected new or changed devices on vm01 2026-03-06T22:27:23.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:22 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:23.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:22 vm01 ceph-mon[46942]: mgrmap e19: vm01.mrlynj(active, since 68s), standbys: vm06.awlziz 2026-03-06T22:27:23.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:22 vm01 ceph-mon[46942]: osdmap e30: 8 total, 8 up, 8 in 2026-03-06T22:27:23.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:22 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:27:23.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:22 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/879370624' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-06T22:27:23.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:22 vm06 ceph-mon[52574]: pgmap v51: 1 pgs: 1 unknown; 0 B data, 459 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:23.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:22 vm06 ceph-mon[52574]: Detected new or changed devices on vm01 2026-03-06T22:27:23.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:22 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:23.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:22 vm06 ceph-mon[52574]: mgrmap e19: vm01.mrlynj(active, since 68s), standbys: vm06.awlziz 2026-03-06T22:27:23.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:22 vm06 ceph-mon[52574]: osdmap e30: 8 total, 8 up, 8 in 2026-03-06T22:27:23.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:22 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:27:23.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:22 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/879370624' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-06T22:27:23.298 INFO:teuthology.orchestra.run.vm01.stdout:[client.0] 2026-03-06T22:27:23.298 INFO:teuthology.orchestra.run.vm01.stdout: key = AQC7RqtpE+NSERAAU+5Pu9lCNFtP2aLwvYWukA== 2026-03-06T22:27:23.371 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-06T22:27:23.371 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-06T22:27:23.371 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-06T22:27:23.404 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-06T22:27:23.731 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm06/config 2026-03-06T22:27:24.108 INFO:teuthology.orchestra.run.vm06.stdout:[client.1] 2026-03-06T22:27:24.108 INFO:teuthology.orchestra.run.vm06.stdout: key = AQC8Rqtpg5TsBRAA5CUFo342mlU3ylqhuyDuYw== 2026-03-06T22:27:24.108 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:23 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3818993023' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T22:27:24.108 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:23 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3818993023' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T22:27:24.172 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-06T22:27:24.172 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-06T22:27:24.172 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-06T22:27:24.211 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-06T22:27:24.212 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-06T22:27:24.212 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph mgr dump --format=json 2026-03-06T22:27:24.237 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:23 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3818993023' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T22:27:24.237 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:23 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3818993023' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T22:27:24.542 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:24.900 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:25.136 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:24 vm01 ceph-mon[46942]: pgmap v53: 1 pgs: 1 unknown; 0 B data, 460 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:25.136 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:24 vm01 ceph-mon[46942]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T22:27:25.137 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:24 vm01 ceph-mon[46942]: from='client.? 192.168.123.106:0/663868293' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T22:27:25.137 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:24 vm01 ceph-mon[46942]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T22:27:25.138 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":19,"flags":0,"active_gid":14221,"active_name":"vm01.mrlynj","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6800","nonce":2327267800},{"type":"v1","addr":"192.168.123.101:6801","nonce":2327267800}]},"active_addr":"192.168.123.101:6801/2327267800","active_change":"2026-03-06T21:26:13.213521+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":14254,"name":"vm06.awlziz","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.101:8443/","prometheus":"http://192.168.123.101:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":3568160580}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":3311011304}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":837672905}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":1806932060}]}]} 2026-03-06T22:27:25.140 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-06T22:27:25.140 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-06T22:27:25.140 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd dump --format=json 2026-03-06T22:27:25.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:24 vm06 ceph-mon[52574]: pgmap v53: 1 pgs: 1 unknown; 0 B data, 460 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:25.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:24 vm06 ceph-mon[52574]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T22:27:25.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:24 vm06 ceph-mon[52574]: from='client.? 192.168.123.106:0/663868293' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T22:27:25.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:24 vm06 ceph-mon[52574]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T22:27:25.489 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:25.955 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:25.955 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":30,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","created":"2026-03-06T21:24:40.690850+0000","modified":"2026-03-06T21:27:21.848594+0000","last_up_change":"2026-03-06T21:27:19.841419+0000","last_in_change":"2026-03-06T21:26:56.266357+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":15,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-06T21:27:13.277242+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"30","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"a7899fe4-5f7a-4604-8760-1e2f968c5695","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6801","nonce":1059922460}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6803","nonce":1059922460}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6807","nonce":1059922460}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6805","nonce":1059922460}]},"public_addr":"192.168.123.106:6801/1059922460","cluster_addr":"192.168.123.106:6803/1059922460","heartbeat_back_addr":"192.168.123.106:6807/1059922460","heartbeat_front_addr":"192.168.123.106:6805/1059922460","state":["exists","up"]},{"osd":1,"uuid":"5d21bba8-8bc4-4ec1-b646-fc50b522c81d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6803","nonce":2762929490}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6805","nonce":2762929490}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6809","nonce":2762929490}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6807","nonce":2762929490}]},"public_addr":"192.168.123.101:6803/2762929490","cluster_addr":"192.168.123.101:6805/2762929490","heartbeat_back_addr":"192.168.123.101:6809/2762929490","heartbeat_front_addr":"192.168.123.101:6807/2762929490","state":["exists","up"]},{"osd":2,"uuid":"d5edc145-cbe2-4519-bb3f-fb428e3e820f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6811","nonce":3040999742}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6813","nonce":3040999742}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6817","nonce":3040999742}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6815","nonce":3040999742}]},"public_addr":"192.168.123.101:6811/3040999742","cluster_addr":"192.168.123.101:6813/3040999742","heartbeat_back_addr":"192.168.123.101:6817/3040999742","heartbeat_front_addr":"192.168.123.101:6815/3040999742","state":["exists","up"]},{"osd":3,"uuid":"3ad582a8-03b4-42cf-844c-b79d2701f629","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6809","nonce":2887797947}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6811","nonce":2887797947}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6815","nonce":2887797947}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6813","nonce":2887797947}]},"public_addr":"192.168.123.106:6809/2887797947","cluster_addr":"192.168.123.106:6811/2887797947","heartbeat_back_addr":"192.168.123.106:6815/2887797947","heartbeat_front_addr":"192.168.123.106:6813/2887797947","state":["exists","up"]},{"osd":4,"uuid":"b83f2bbe-cfb7-4b75-ac34-55fca55be32a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6817","nonce":3680182952}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6819","nonce":3680182952}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6823","nonce":3680182952}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6821","nonce":3680182952}]},"public_addr":"192.168.123.106:6817/3680182952","cluster_addr":"192.168.123.106:6819/3680182952","heartbeat_back_addr":"192.168.123.106:6823/3680182952","heartbeat_front_addr":"192.168.123.106:6821/3680182952","state":["exists","up"]},{"osd":5,"uuid":"507447ec-8f5a-48a8-8611-bf4a66e8bbc1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6819","nonce":1440378951}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6821","nonce":1440378951}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6825","nonce":1440378951}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6823","nonce":1440378951}]},"public_addr":"192.168.123.101:6819/1440378951","cluster_addr":"192.168.123.101:6821/1440378951","heartbeat_back_addr":"192.168.123.101:6825/1440378951","heartbeat_front_addr":"192.168.123.101:6823/1440378951","state":["exists","up"]},{"osd":6,"uuid":"91b15bb3-09a4-485e-92be-1d84ac40349c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6825","nonce":3714035948}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6827","nonce":3714035948}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6831","nonce":3714035948}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6829","nonce":3714035948}]},"public_addr":"192.168.123.106:6825/3714035948","cluster_addr":"192.168.123.106:6827/3714035948","heartbeat_back_addr":"192.168.123.106:6831/3714035948","heartbeat_front_addr":"192.168.123.106:6829/3714035948","state":["exists","up"]},{"osd":7,"uuid":"0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6827","nonce":297143947}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6829","nonce":297143947}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6833","nonce":297143947}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6831","nonce":297143947}]},"public_addr":"192.168.123.101:6827/297143947","cluster_addr":"192.168.123.101:6829/297143947","heartbeat_back_addr":"192.168.123.101:6833/297143947","heartbeat_front_addr":"192.168.123.101:6831/297143947","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:06.157777+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:06.487693+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:10.435760+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:09.504310+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:12.783211+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:13.980373+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:15.956329+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:18.335858+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/2226875238":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/813203294":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/3853036737":"2026-03-07T21:25:31.761348+0000","192.168.123.101:0/2217362490":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/3791407434":"2026-03-07T21:25:09.037216+0000","192.168.123.101:6800/100672136":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/1533654466":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6801/100672136":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/3777790225":"2026-03-07T21:25:09.037216+0000","192.168.123.101:6800/1153325880":"2026-03-07T21:26:13.213414+0000","192.168.123.101:6801/1153325880":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/781181018":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6800/3028204436":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6801/3028204436":"2026-03-07T21:25:31.761348+0000","192.168.123.101:0/4042132768":"2026-03-07T21:26:13.213414+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-06T22:27:26.008 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-06T22:27:26.009 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd dump --format=json 2026-03-06T22:27:26.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:25 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3761893559' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-06T22:27:26.326 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:26.349 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:25 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3761893559' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-06T22:27:26.636 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:26.636 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":30,"fsid":"c76e688a-19a2-11f1-bdea-01160fc6f239","created":"2026-03-06T21:24:40.690850+0000","modified":"2026-03-06T21:27:21.848594+0000","last_up_change":"2026-03-06T21:27:19.841419+0000","last_in_change":"2026-03-06T21:26:56.266357+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":15,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-06T21:27:13.277242+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"30","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"a7899fe4-5f7a-4604-8760-1e2f968c5695","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6801","nonce":1059922460}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6803","nonce":1059922460}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6807","nonce":1059922460}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1059922460},{"type":"v1","addr":"192.168.123.106:6805","nonce":1059922460}]},"public_addr":"192.168.123.106:6801/1059922460","cluster_addr":"192.168.123.106:6803/1059922460","heartbeat_back_addr":"192.168.123.106:6807/1059922460","heartbeat_front_addr":"192.168.123.106:6805/1059922460","state":["exists","up"]},{"osd":1,"uuid":"5d21bba8-8bc4-4ec1-b646-fc50b522c81d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6803","nonce":2762929490}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6805","nonce":2762929490}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6809","nonce":2762929490}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":2762929490},{"type":"v1","addr":"192.168.123.101:6807","nonce":2762929490}]},"public_addr":"192.168.123.101:6803/2762929490","cluster_addr":"192.168.123.101:6805/2762929490","heartbeat_back_addr":"192.168.123.101:6809/2762929490","heartbeat_front_addr":"192.168.123.101:6807/2762929490","state":["exists","up"]},{"osd":2,"uuid":"d5edc145-cbe2-4519-bb3f-fb428e3e820f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6811","nonce":3040999742}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6813","nonce":3040999742}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6817","nonce":3040999742}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":3040999742},{"type":"v1","addr":"192.168.123.101:6815","nonce":3040999742}]},"public_addr":"192.168.123.101:6811/3040999742","cluster_addr":"192.168.123.101:6813/3040999742","heartbeat_back_addr":"192.168.123.101:6817/3040999742","heartbeat_front_addr":"192.168.123.101:6815/3040999742","state":["exists","up"]},{"osd":3,"uuid":"3ad582a8-03b4-42cf-844c-b79d2701f629","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6809","nonce":2887797947}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6811","nonce":2887797947}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6815","nonce":2887797947}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":2887797947},{"type":"v1","addr":"192.168.123.106:6813","nonce":2887797947}]},"public_addr":"192.168.123.106:6809/2887797947","cluster_addr":"192.168.123.106:6811/2887797947","heartbeat_back_addr":"192.168.123.106:6815/2887797947","heartbeat_front_addr":"192.168.123.106:6813/2887797947","state":["exists","up"]},{"osd":4,"uuid":"b83f2bbe-cfb7-4b75-ac34-55fca55be32a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6817","nonce":3680182952}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6819","nonce":3680182952}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6823","nonce":3680182952}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":3680182952},{"type":"v1","addr":"192.168.123.106:6821","nonce":3680182952}]},"public_addr":"192.168.123.106:6817/3680182952","cluster_addr":"192.168.123.106:6819/3680182952","heartbeat_back_addr":"192.168.123.106:6823/3680182952","heartbeat_front_addr":"192.168.123.106:6821/3680182952","state":["exists","up"]},{"osd":5,"uuid":"507447ec-8f5a-48a8-8611-bf4a66e8bbc1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6819","nonce":1440378951}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6821","nonce":1440378951}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6825","nonce":1440378951}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":1440378951},{"type":"v1","addr":"192.168.123.101:6823","nonce":1440378951}]},"public_addr":"192.168.123.101:6819/1440378951","cluster_addr":"192.168.123.101:6821/1440378951","heartbeat_back_addr":"192.168.123.101:6825/1440378951","heartbeat_front_addr":"192.168.123.101:6823/1440378951","state":["exists","up"]},{"osd":6,"uuid":"91b15bb3-09a4-485e-92be-1d84ac40349c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6825","nonce":3714035948}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6827","nonce":3714035948}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6831","nonce":3714035948}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":3714035948},{"type":"v1","addr":"192.168.123.106:6829","nonce":3714035948}]},"public_addr":"192.168.123.106:6825/3714035948","cluster_addr":"192.168.123.106:6827/3714035948","heartbeat_back_addr":"192.168.123.106:6831/3714035948","heartbeat_front_addr":"192.168.123.106:6829/3714035948","state":["exists","up"]},{"osd":7,"uuid":"0bc4acec-8f09-4d2b-9bf6-2fe0cd00c69c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6827","nonce":297143947}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6829","nonce":297143947}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6833","nonce":297143947}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":297143947},{"type":"v1","addr":"192.168.123.101:6831","nonce":297143947}]},"public_addr":"192.168.123.101:6827/297143947","cluster_addr":"192.168.123.101:6829/297143947","heartbeat_back_addr":"192.168.123.101:6833/297143947","heartbeat_front_addr":"192.168.123.101:6831/297143947","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:06.157777+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:06.487693+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:10.435760+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:09.504310+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:12.783211+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:13.980373+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:15.956329+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T21:27:18.335858+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/2226875238":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/813203294":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/3853036737":"2026-03-07T21:25:31.761348+0000","192.168.123.101:0/2217362490":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/3791407434":"2026-03-07T21:25:09.037216+0000","192.168.123.101:6800/100672136":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/1533654466":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6801/100672136":"2026-03-07T21:25:09.037216+0000","192.168.123.101:0/3777790225":"2026-03-07T21:25:09.037216+0000","192.168.123.101:6800/1153325880":"2026-03-07T21:26:13.213414+0000","192.168.123.101:6801/1153325880":"2026-03-07T21:26:13.213414+0000","192.168.123.101:0/781181018":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6800/3028204436":"2026-03-07T21:25:31.761348+0000","192.168.123.101:6801/3028204436":"2026-03-07T21:25:31.761348+0000","192.168.123.101:0/4042132768":"2026-03-07T21:26:13.213414+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-06T22:27:26.691 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.0 flush_pg_stats 2026-03-06T22:27:26.691 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.1 flush_pg_stats 2026-03-06T22:27:26.691 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.2 flush_pg_stats 2026-03-06T22:27:26.691 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.3 flush_pg_stats 2026-03-06T22:27:26.691 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.4 flush_pg_stats 2026-03-06T22:27:26.692 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.5 flush_pg_stats 2026-03-06T22:27:26.692 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.6 flush_pg_stats 2026-03-06T22:27:26.692 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph tell osd.7 flush_pg_stats 2026-03-06T22:27:26.895 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:26 vm01 ceph-mon[46942]: pgmap v54: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:26.896 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:26 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/2780039737' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T22:27:26.896 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:26 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3824326339' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T22:27:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:26 vm06 ceph-mon[52574]: pgmap v54: 1 pgs: 1 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:26 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/2780039737' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T22:27:27.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:26 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3824326339' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T22:27:27.957 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.006 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.156 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.341 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.343 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.350 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.506 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.515 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:28.953 INFO:teuthology.orchestra.run.vm01.stdout:85899345925 2026-03-06T22:27:28.953 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.2 2026-03-06T22:27:28.969 INFO:teuthology.orchestra.run.vm01.stdout:94489280516 2026-03-06T22:27:28.969 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.4 2026-03-06T22:27:29.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:28 vm01 ceph-mon[46942]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:29.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:28 vm01 ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:29.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:28 vm06 ceph-mon[52574]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:29.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:28 vm06 ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:29.213 INFO:teuthology.orchestra.run.vm01.stdout:68719476742 2026-03-06T22:27:29.213 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.1 2026-03-06T22:27:29.507 INFO:teuthology.orchestra.run.vm01.stdout:103079215108 2026-03-06T22:27:29.508 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.5 2026-03-06T22:27:29.956 INFO:teuthology.orchestra.run.vm01.stdout:68719476742 2026-03-06T22:27:29.956 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.0 2026-03-06T22:27:29.972 INFO:teuthology.orchestra.run.vm01.stdout:85899345925 2026-03-06T22:27:29.972 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.3 2026-03-06T22:27:29.993 INFO:teuthology.orchestra.run.vm01.stdout:120259084291 2026-03-06T22:27:29.993 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.7 2026-03-06T22:27:30.004 INFO:teuthology.orchestra.run.vm01.stdout:115964116996 2026-03-06T22:27:30.004 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph osd last-stat-seq osd.6 2026-03-06T22:27:30.165 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:30.722 INFO:teuthology.orchestra.run.vm01.stdout:85899345925 2026-03-06T22:27:30.823 INFO:tasks.cephadm.ceph_manager.ceph:need seq 85899345925 got 85899345925 for osd.2 2026-03-06T22:27:30.823 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:30.845 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:30.973 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:30 vm01 ceph-mon[46942]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:30.974 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:30 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1896578764' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-06T22:27:31.013 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:31.271 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:31.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:30 vm06 ceph-mon[52574]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:31.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:30 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1896578764' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-06T22:27:31.601 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:31.807 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:31.940 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:31.959 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:31.976 INFO:teuthology.orchestra.run.vm01.stdout:94489280517 2026-03-06T22:27:32.139 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280516 got 94489280517 for osd.4 2026-03-06T22:27:32.139 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:32.159 INFO:teuthology.orchestra.run.vm01.stdout:68719476742 2026-03-06T22:27:32.308 INFO:teuthology.orchestra.run.vm01.stdout:115964116996 2026-03-06T22:27:32.351 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476742 got 68719476742 for osd.1 2026-03-06T22:27:32.351 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:32.497 INFO:tasks.cephadm.ceph_manager.ceph:need seq 115964116996 got 115964116996 for osd.6 2026-03-06T22:27:32.498 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:32.654 INFO:teuthology.orchestra.run.vm01.stdout:103079215109 2026-03-06T22:27:32.727 INFO:teuthology.orchestra.run.vm01.stdout:120259084292 2026-03-06T22:27:32.744 INFO:teuthology.orchestra.run.vm01.stdout:68719476742 2026-03-06T22:27:32.813 INFO:teuthology.orchestra.run.vm01.stdout:85899345925 2026-03-06T22:27:32.831 INFO:tasks.cephadm.ceph_manager.ceph:need seq 120259084291 got 120259084292 for osd.7 2026-03-06T22:27:32.831 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:32.844 INFO:tasks.cephadm.ceph_manager.ceph:need seq 103079215108 got 103079215109 for osd.5 2026-03-06T22:27:32.844 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:32.901 INFO:tasks.cephadm.ceph_manager.ceph:need seq 85899345925 got 85899345925 for osd.3 2026-03-06T22:27:32.902 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:32.902 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476742 got 68719476742 for osd.0 2026-03-06T22:27:32.902 DEBUG:teuthology.parallel:result is None 2026-03-06T22:27:32.902 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-06T22:27:32.902 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph pg dump --format=json 2026-03-06T22:27:32.912 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:32.912 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3152122952' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-06T22:27:33.176 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3889242347' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-06T22:27:33.176 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/844342230' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-06T22:27:33.176 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1610275875' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-06T22:27:33.176 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/4257925187' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-06T22:27:33.176 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/1898022942' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-06T22:27:33.176 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:32 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/3139926904' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-06T22:27:33.216 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3152122952' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3889242347' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/844342230' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1610275875' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/4257925187' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/1898022942' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-06T22:27:33.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:32 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/3139926904' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-06T22:27:33.517 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:33.518 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-03-06T22:27:33.566 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":58,"stamp":"2026-03-06T21:27:33.239641+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":218744,"kb_used_data":3628,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167520648,"statfs":{"total":171765137408,"available":171541143552,"internally_reserved":0,"allocated":3715072,"data_stored":2300000,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12711,"internal_metadata":219663961},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.001008"},"pg_stats":[{"pgid":"1.0","version":"29'32","reported_seq":57,"reported_epoch":30,"state":"active+clean","last_fresh":"2026-03-06T21:27:22.197881+0000","last_change":"2026-03-06T21:27:20.946363+0000","last_active":"2026-03-06T21:27:22.197881+0000","last_peered":"2026-03-06T21:27:22.197881+0000","last_clean":"2026-03-06T21:27:22.197881+0000","last_became_active":"2026-03-06T21:27:20.946214+0000","last_became_peered":"2026-03-06T21:27:20.946214+0000","last_unstale":"2026-03-06T21:27:22.197881+0000","last_undegraded":"2026-03-06T21:27:22.197881+0000","last_fullsized":"2026-03-06T21:27:22.197881+0000","mapping_epoch":28,"log_start":"0'0","ondisk_log_start":"0'0","created":22,"last_epoch_clean":29,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-06T21:27:13.894315+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-06T21:27:13.894315+0000","last_clean_scrub_stamp":"2026-03-06T21:27:13.894315+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T06:11:44.167628+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,3],"acting":[6,5,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":28,"seq":120259084292,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":27,"seq":115964116996,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27700,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939724,"statfs":{"total":21470642176,"available":21442277376,"internally_reserved":0,"allocated":753664,"data_stored":574550,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":24,"seq":103079215109,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27708,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939716,"statfs":{"total":21470642176,"available":21442269184,"internally_reserved":0,"allocated":753664,"data_stored":574550,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":22,"seq":94489280517,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":20,"seq":85899345926,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27704,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939720,"statfs":{"total":21470642176,"available":21442273280,"internally_reserved":0,"allocated":753664,"data_stored":574550,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":20,"seq":85899345926,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27132,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940292,"statfs":{"total":21470642176,"available":21442859008,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476742,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27128,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940296,"statfs":{"total":21470642176,"available":21442863104,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476742,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-06T22:27:33.566 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph pg dump --format=json 2026-03-06T22:27:33.918 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:34.212 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:34.212 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-03-06T22:27:34.277 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":58,"stamp":"2026-03-06T21:27:33.239641+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":218744,"kb_used_data":3628,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167520648,"statfs":{"total":171765137408,"available":171541143552,"internally_reserved":0,"allocated":3715072,"data_stored":2300000,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12711,"internal_metadata":219663961},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.001008"},"pg_stats":[{"pgid":"1.0","version":"29'32","reported_seq":57,"reported_epoch":30,"state":"active+clean","last_fresh":"2026-03-06T21:27:22.197881+0000","last_change":"2026-03-06T21:27:20.946363+0000","last_active":"2026-03-06T21:27:22.197881+0000","last_peered":"2026-03-06T21:27:22.197881+0000","last_clean":"2026-03-06T21:27:22.197881+0000","last_became_active":"2026-03-06T21:27:20.946214+0000","last_became_peered":"2026-03-06T21:27:20.946214+0000","last_unstale":"2026-03-06T21:27:22.197881+0000","last_undegraded":"2026-03-06T21:27:22.197881+0000","last_fullsized":"2026-03-06T21:27:22.197881+0000","mapping_epoch":28,"log_start":"0'0","ondisk_log_start":"0'0","created":22,"last_epoch_clean":29,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-06T21:27:13.894315+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-06T21:27:13.894315+0000","last_clean_scrub_stamp":"2026-03-06T21:27:13.894315+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T06:11:44.167628+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,3],"acting":[6,5,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":28,"seq":120259084292,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":27,"seq":115964116996,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27700,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939724,"statfs":{"total":21470642176,"available":21442277376,"internally_reserved":0,"allocated":753664,"data_stored":574550,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":24,"seq":103079215109,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27708,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939716,"statfs":{"total":21470642176,"available":21442269184,"internally_reserved":0,"allocated":753664,"data_stored":574550,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":22,"seq":94489280517,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":20,"seq":85899345926,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27704,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939720,"statfs":{"total":21470642176,"available":21442273280,"internally_reserved":0,"allocated":753664,"data_stored":574550,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":20,"seq":85899345926,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27132,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940292,"statfs":{"total":21470642176,"available":21442859008,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476742,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27128,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940296,"statfs":{"total":21470642176,"available":21442863104,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476742,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115270,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-06T22:27:34.277 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-06T22:27:34.277 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-06T22:27:34.277 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-06T22:27:34.277 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph health --format=json 2026-03-06T22:27:34.608 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:34.962 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:34.962 INFO:teuthology.orchestra.run.vm01.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-06T22:27:35.014 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-06T22:27:35.015 INFO:tasks.cephadm:Setup complete, yielding 2026-03-06T22:27:35.015 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T22:27:35.017 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:27:35.017 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch status' 2026-03-06T22:27:35.358 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:35.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:34 vm01 ceph-mon[46942]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:35.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:34 vm01 ceph-mon[46942]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:35.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:34 vm06 ceph-mon[52574]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:35.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:34 vm06 ceph-mon[52574]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:35.677 INFO:teuthology.orchestra.run.vm01.stdout:Backend: cephadm 2026-03-06T22:27:35.677 INFO:teuthology.orchestra.run.vm01.stdout:Available: Yes 2026-03-06T22:27:35.677 INFO:teuthology.orchestra.run.vm01.stdout:Paused: No 2026-03-06T22:27:35.748 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch ps' 2026-03-06T22:27:36.079 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:36.102 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:35 vm01 ceph-mon[46942]: from='client.14542 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:36.102 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:35 vm01 ceph-mon[46942]: from='client.? 192.168.123.101:0/651513025' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-06T22:27:36.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:35 vm06 ceph-mon[52574]: from='client.14542 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:36.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:35 vm06 ceph-mon[52574]: from='client.? 192.168.123.101:0/651513025' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-06T22:27:36.408 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-06T22:27:36.408 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.vm01 vm01 *:9093,9094 running (60s) 17s ago 109s 22.5M - 0.25.0 c8568f914cd2 4365d374f776 2026-03-06T22:27:36.408 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm01 vm01 running (117s) 17s ago 117s 8744k - 19.2.3-39-g340d3c24fc6 8bccc98d839a c48ec14a483b 2026-03-06T22:27:36.408 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm06 vm06 running (76s) 17s ago 76s 6865k - 19.2.3-39-g340d3c24fc6 8bccc98d839a 72acd665fd43 2026-03-06T22:27:36.408 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm01 vm01 running (115s) 17s ago 115s 11.2M - 19.2.3-39-g340d3c24fc6 8bccc98d839a c2e845502308 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm06 vm06 running (74s) 17s ago 74s 11.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a d5f5cd20661b 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:grafana.vm01 vm01 *:3000 running (59s) 17s ago 102s 69.4M - 10.4.0 c8b91775d855 193f68923397 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm01.mrlynj vm01 *:9283,8765,8443 running (2m) 17s ago 2m 542M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 73059a4c7a15 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm06.awlziz vm06 *:8443,9283,8765 running (70s) 17s ago 70s 476M - 19.2.3-39-g340d3c24fc6 8bccc98d839a d02607312b6f 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm01 vm01 running (2m) 17s ago 2m 48.4M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a 7fb7e5e913b1 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm06 vm06 running (69s) 17s ago 69s 41.0M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a d717d212d7fb 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm01 vm01 *:9100 running (112s) 17s ago 112s 9424k - 1.7.0 72c9c2088986 ec0df0927f80 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm06 vm06 *:9100 running (72s) 17s ago 72s 9256k - 1.7.0 72c9c2088986 b6c10aca64f3 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm06 running (32s) 17s ago 32s 29.7M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 213b5980df4e 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (32s) 17s ago 32s 56.0M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a cbe973f313a3 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (28s) 17s ago 28s 31.3M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a e510991af211 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm06 running (29s) 17s ago 29s 53.0M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a a0c06f5a2620 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (26s) 17s ago 26s 33.1M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a ab91fb94fa67 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm01 running (24s) 17s ago 24s 31.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a ef271b290e75 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (22s) 17s ago 22s 40.3M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a bcb18e202ffa 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm01 running (21s) 17s ago 21s 25.6M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 37a8ab69a547 2026-03-06T22:27:36.409 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.vm01 vm01 *:9095 running (57s) 17s ago 95s 31.8M - 2.51.0 1d3b7f56885b aa1d19d527aa 2026-03-06T22:27:36.511 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch ls' 2026-03-06T22:27:36.827 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:37.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:36 vm01 ceph-mon[46942]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:37.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:36 vm01 ceph-mon[46942]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager ?:9093,9094 1/1 18s ago 2m count:1 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter 2/2 18s ago 2m * 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:crash 2/2 18s ago 2m * 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:grafana ?:3000 1/1 18s ago 2m count:1 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:mgr 2/2 18s ago 2m count:2 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:mon 2/2 18s ago 113s vm01:192.168.123.101=vm01;vm06:192.168.123.106=vm06;count:2 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter ?:9100 2/2 18s ago 2m * 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:osd.all-available-devices 8 18s ago 59s * 2026-03-06T22:27:37.165 INFO:teuthology.orchestra.run.vm01.stdout:prometheus ?:9095 1/1 18s ago 2m count:1 2026-03-06T22:27:37.243 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch host ls' 2026-03-06T22:27:37.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:36 vm06 ceph-mon[52574]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:37.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:36 vm06 ceph-mon[52574]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:37.566 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:37.887 INFO:teuthology.orchestra.run.vm01.stdout:HOST ADDR LABELS STATUS 2026-03-06T22:27:37.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 192.168.123.101 2026-03-06T22:27:37.887 INFO:teuthology.orchestra.run.vm01.stdout:vm06 192.168.123.106 2026-03-06T22:27:37.887 INFO:teuthology.orchestra.run.vm01.stdout:2 hosts in cluster 2026-03-06T22:27:37.961 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch device ls' 2026-03-06T22:27:38.281 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:38.302 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:37 vm01 ceph-mon[46942]: from='client.14554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:38.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:37 vm06 ceph-mon[52574]: from='client.14554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:38.598 INFO:teuthology.orchestra.run.vm01.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-06T22:27:38.598 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 16s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T22:27:38.598 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.598 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.598 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.599 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.599 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 18s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T22:27:38.599 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.599 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.599 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.599 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:38.667 INFO:teuthology.run_tasks:Running task vip... 2026-03-06T22:27:38.669 INFO:tasks.vip:Allocating static IPs for each host... 2026-03-06T22:27:38.669 INFO:tasks.vip:peername 192.168.123.101 2026-03-06T22:27:38.669 INFO:tasks.vip:192.168.123.101 in 192.168.123.0/24, pos 100 2026-03-06T22:27:38.670 INFO:tasks.vip:vm01.local static 12.12.0.101, vnet 12.12.0.0/22 2026-03-06T22:27:38.670 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.101')] 2026-03-06T22:27:38.670 DEBUG:teuthology.orchestra.run.vm01:> sudo ip route ls 2026-03-06T22:27:38.699 INFO:teuthology.orchestra.run.vm01.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.101 metric 100 2026-03-06T22:27:38.699 INFO:teuthology.orchestra.run.vm01.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.101 metric 100 2026-03-06T22:27:38.700 INFO:tasks.vip:Configuring 12.12.0.101 on vm01.local iface eth0... 2026-03-06T22:27:38.700 DEBUG:teuthology.orchestra.run.vm01:> sudo ip addr add 12.12.0.101/22 dev eth0 2026-03-06T22:27:38.766 INFO:tasks.vip:peername 192.168.123.106 2026-03-06T22:27:38.767 INFO:tasks.vip:192.168.123.106 in 192.168.123.0/24, pos 105 2026-03-06T22:27:38.767 INFO:tasks.vip:vm06.local static 12.12.0.106, vnet 12.12.0.0/22 2026-03-06T22:27:38.767 DEBUG:teuthology.orchestra.run.vm06:> sudo ip route ls 2026-03-06T22:27:38.789 INFO:teuthology.orchestra.run.vm06.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.106 metric 100 2026-03-06T22:27:38.789 INFO:teuthology.orchestra.run.vm06.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.106 metric 100 2026-03-06T22:27:38.790 INFO:tasks.vip:Configuring 12.12.0.106 on vm06.local iface eth0... 2026-03-06T22:27:38.791 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr add 12.12.0.106/22 dev eth0 2026-03-06T22:27:38.855 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T22:27:38.857 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:27:38.857 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch device ls --refresh' 2026-03-06T22:27:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:38 vm06.local ceph-mon[52574]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:38 vm06.local ceph-mon[52574]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:38 vm06.local ceph-mon[52574]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:39.169 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:39.191 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:38 vm01.local ceph-mon[46942]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:39.191 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:38 vm01.local ceph-mon[46942]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:39.191 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:38 vm01.local ceph-mon[46942]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 17s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 18s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.490 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.491 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:27:39.560 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-06T22:27:39.562 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:27:39.562 DEBUG:teuthology.orchestra.run.vm01:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-06T22:27:39.588 INFO:teuthology.orchestra.run.vm01.stderr:+ systemctl stop nfs-server 2026-03-06T22:27:39.595 INFO:tasks.vip:Running commands on role host.b host ubuntu@vm06.local 2026-03-06T22:27:39.595 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-06T22:27:39.620 INFO:teuthology.orchestra.run.vm06.stderr:+ systemctl stop nfs-server 2026-03-06T22:27:39.626 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T22:27:39.629 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:27:39.630 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph fs volume create foofs' 2026-03-06T22:27:40.047 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:40.134 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:39 vm01.local ceph-mon[46942]: from='client.24331 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:40.134 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:39 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:27:40.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:39 vm06.local ceph-mon[52574]: from='client.24331 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:40.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:39 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:27:41.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:40 vm01.local ceph-mon[46942]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:41.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:40 vm01.local ceph-mon[46942]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:41.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:40 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:41.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:40 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:41.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:40 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-06T22:27:41.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:40 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:41.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:40 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:41.202 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:40 vm06.local ceph-mon[52574]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:41.202 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:40 vm06.local ceph-mon[52574]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:41.202 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:40 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:41.202 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:40 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:41.202 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:40 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]: dispatch 2026-03-06T22:27:41.202 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:40 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:41.202 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:40 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.024 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:42.024 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.024 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.024 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.024 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-06T22:27:42.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: osdmap e31: 8 total, 8 up, 8 in 2026-03-06T22:27:42.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "foofs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool create", "pool": "cephfs.foofs.meta"}]': finished 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: osdmap e31: 8 total, 8 up, 8 in 2026-03-06T22:27:42.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:42 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]: dispatch 2026-03-06T22:27:42.575 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph nfs cluster create foo --ingress --virtual-ip 12.12.1.101/22 --port 2999' 2026-03-06T22:27:42.804 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:42 vm01.local ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm01[46938]: 2026-03-06T21:27:42.452+0000 7f59eff27640 -1 log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-06T22:27:42.949 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:43.065 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: osdmap e32: 8 total, 8 up, 8 in 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: osdmap e33: 8 total, 8 up, 8 in 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: fsmap foofs:0 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.aeobze", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.aeobze", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-06T22:27:43.066 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:43.287 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:43.287 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.foofs.data"}]': finished 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: osdmap e32: 8 total, 8 up, 8 in 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]: dispatch 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "fs new", "fs_name": "foofs", "metadata": "cephfs.foofs.meta", "data": "cephfs.foofs.data"}]': finished 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: osdmap e33: 8 total, 8 up, 8 in 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: fsmap foofs:0 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.aeobze", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm06.aeobze", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-06T22:27:43.288 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: Saving service mds.foofs spec with placement count:2 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: Deploying daemon mds.foofs.vm06.aeobze on vm06 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: pgmap v66: 65 pgs: 3 creating+peering, 46 unknown, 16 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "ingress": true, "virtual_ip": "12.12.1.101/22", "port": 2999, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]': finished 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: osdmap e34: 8 total, 8 up, 8 in 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.039 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.040 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.040 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm01.aitcjt", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-06T22:27:44.040 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm01.aitcjt", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-06T22:27:44.291 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: Saving service mds.foofs spec with placement count:2 2026-03-06T22:27:44.291 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: Deploying daemon mds.foofs.vm06.aeobze on vm06 2026-03-06T22:27:44.291 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: pgmap v66: 65 pgs: 3 creating+peering, 46 unknown, 16 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-03-06T22:27:44.291 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.291 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "ingress": true, "virtual_ip": "12.12.1.101/22", "port": 2999, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]': finished 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: osdmap e34: 8 total, 8 up, 8 in 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm01.aitcjt", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "mds.foofs.vm01.aitcjt", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:44.292 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:44 vm01.local ceph-mon[46942]: Deploying daemon mds.foofs.vm01.aitcjt on vm01 2026-03-06T22:27:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:44.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:44 vm06.local ceph-mon[52574]: Deploying daemon mds.foofs.vm01.aitcjt on vm01 2026-03-06T22:27:44.707 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake' 2026-03-06T22:27:45.140 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: osdmap e35: 8 total, 8 up, 8 in 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778] up:boot 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: daemon mds.foofs.vm06.aeobze assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: Cluster is now healthy 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: fsmap foofs:0 1 up:standby 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata", "who": "foofs.vm06.aeobze"}]: dispatch 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: Saving service nfs.foo spec with placement count:1 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:creating} 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: daemon mds.foofs.vm06.aeobze is now active in filesystem foofs as rank 0 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: Saving service ingress.nfs.foo spec with placement count:2 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.493 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.644 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-06T22:27:45.644 INFO:teuthology.orchestra.run.vm01.stdout: "bind": "/fake", 2026-03-06T22:27:45.644 INFO:teuthology.orchestra.run.vm01.stdout: "cluster": "foo", 2026-03-06T22:27:45.644 INFO:teuthology.orchestra.run.vm01.stdout: "fs": "foofs", 2026-03-06T22:27:45.644 INFO:teuthology.orchestra.run.vm01.stdout: "mode": "RW", 2026-03-06T22:27:45.644 INFO:teuthology.orchestra.run.vm01.stdout: "path": "/" 2026-03-06T22:27:45.644 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: osdmap e35: 8 total, 8 up, 8 in 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778] up:boot 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: daemon mds.foofs.vm06.aeobze assigned to filesystem foofs as rank 0 (now has 1 ranks) 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: Cluster is now healthy 2026-03-06T22:27:45.653 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: fsmap foofs:0 1 up:standby 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata", "who": "foofs.vm06.aeobze"}]: dispatch 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: Saving service nfs.foo spec with placement count:1 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:creating} 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: daemon mds.foofs.vm06.aeobze is now active in filesystem foofs as rank 0 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: Saving service ingress.nfs.foo spec with placement count:2 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:45 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:27:45.709 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-06T22:27:45.711 INFO:tasks.cephadm:Waiting for ceph service nfs.foo to start (timeout 300)... 2026-03-06T22:27:45.712 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:45.745 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.745 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.745 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:45.745 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:45 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:27:46.134 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:46.493 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:46.493 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:41.014477Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:41.014345Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:41.014390Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:41.014527Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:44.581851Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 1, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:41.014299Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:41.014228Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:44.576918Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:41.014434Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:41.014593Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:41.014566Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:46.565 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: pgmap v69: 97 pgs: 13 creating+peering, 20 unknown, 64 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 0 op/s 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: osdmap e36: 8 total, 8 up, 8 in 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: from='client.14586 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778] up:active 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: mds.? [v2:192.168.123.101:6834/2763032966,v1:192.168.123.101:6835/2763032966] up:boot 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata", "who": "foofs.vm01.aitcjt"}]: dispatch 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: mgrmap e20: vm01.mrlynj(active, since 92s), standbys: vm06.awlziz 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:46.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: pgmap v69: 97 pgs: 13 creating+peering, 20 unknown, 64 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 0 op/s 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: osdmap e36: 8 total, 8 up, 8 in 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: from='client.14586 -' entity='client.admin' cmd=[{"prefix": "nfs export create cephfs", "fsname": "foofs", "cluster_id": "foo", "pseudo_path": "/fake", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778] up:active 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: mds.? [v2:192.168.123.101:6834/2763032966,v1:192.168.123.101:6835/2763032966] up:boot 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "mds metadata", "who": "foofs.vm01.aitcjt"}]: dispatch 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]': finished 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.foofs.94ac2614", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo, allow rw tag cephfs data=foofs", "mds", "allow rw path=/"], "format": "json"}]: dispatch 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: mgrmap e20: vm01.mrlynj(active, since 92s), standbys: vm06.awlziz 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:46.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:47.566 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='client.14596 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: Creating key for client.nfs.foo.0.0.vm01.mkzqsn 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: Rados config object exists: conf-nfs.foo 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: Creating key for client.nfs.foo.0.0.vm01.mkzqsn-rgw 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: Bind address in nfs.foo.0.0.vm01.mkzqsn's ganesha conf is defaulting to empty 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:47.811 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:47 vm01.local ceph-mon[46942]: Deploying daemon nfs.foo.0.0.vm01.mkzqsn on vm01 2026-03-06T22:27:47.985 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='client.14596 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: Creating key for client.nfs.foo.0.0.vm01.mkzqsn 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: Rados config object exists: conf-nfs.foo 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: Creating key for client.nfs.foo.0.0.vm01.mkzqsn-rgw 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm01.mkzqsn-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: Bind address in nfs.foo.0.0.vm01.mkzqsn's ganesha conf is defaulting to empty 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:27:48.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:47 vm06.local ceph-mon[52574]: Deploying daemon nfs.foo.0.0.vm01.mkzqsn on vm01 2026-03-06T22:27:48.327 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:48.327 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:44.581851Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:48.381 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: pgmap v71: 97 pgs: 13 creating+peering, 7 unknown, 77 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 214 B/s wr, 0 op/s 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: mgrmap e21: vm01.mrlynj(active, since 94s), standbys: vm06.awlziz 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:48 vm01.local ceph-mon[46942]: Deploying daemon haproxy.nfs.foo.vm01.ghmxpc on vm01 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: pgmap v71: 97 pgs: 13 creating+peering, 7 unknown, 77 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 214 B/s wr, 0 op/s 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: mgrmap e21: vm01.mrlynj(active, since 94s), standbys: vm06.awlziz 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:49.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:48 vm06.local ceph-mon[52574]: Deploying daemon haproxy.nfs.foo.vm01.ghmxpc on vm01 2026-03-06T22:27:49.382 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:49.706 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:50.022 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:49 vm01.local ceph-mon[46942]: from='client.14616 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:50.022 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:49 vm01.local ceph-mon[46942]: mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778] up:active 2026-03-06T22:27:50.022 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:49 vm01.local ceph-mon[46942]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:50.053 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:50.053 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:44.581851Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:50.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:49 vm06.local ceph-mon[52574]: from='client.14616 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:50.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:49 vm06.local ceph-mon[52574]: mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778] up:active 2026-03-06T22:27:50.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:49 vm06.local ceph-mon[52574]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:50.179 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:51.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:50 vm01.local ceph-mon[46942]: pgmap v72: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 3.0 KiB/s wr, 9 op/s 2026-03-06T22:27:51.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:50 vm01.local ceph-mon[46942]: mds.? [v2:192.168.123.101:6834/2763032966,v1:192.168.123.101:6835/2763032966] up:standby 2026-03-06T22:27:51.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:50 vm01.local ceph-mon[46942]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:51.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:50 vm06.local ceph-mon[52574]: pgmap v72: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 3.0 KiB/s wr, 9 op/s 2026-03-06T22:27:51.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:50 vm06.local ceph-mon[52574]: mds.? [v2:192.168.123.101:6834/2763032966,v1:192.168.123.101:6835/2763032966] up:standby 2026-03-06T22:27:51.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:50 vm06.local ceph-mon[52574]: fsmap foofs:1 {0=foofs.vm06.aeobze=up:active} 1 up:standby 2026-03-06T22:27:51.180 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:51.625 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:52.021 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:52.021 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:51.989473Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:52.021 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:51 vm01.local ceph-mon[46942]: from='client.14620 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:52.110 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:52.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:51 vm06.local ceph-mon[52574]: from='client.14620 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:53.111 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:53.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:52 vm01.local ceph-mon[46942]: pgmap v73: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 3.3 KiB/s wr, 9 op/s 2026-03-06T22:27:53.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:52 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:52 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:52 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:52 vm01.local ceph-mon[46942]: Deploying daemon haproxy.nfs.foo.vm06.kpjmdj on vm06 2026-03-06T22:27:53.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:52 vm01.local ceph-mon[46942]: from='client.14628 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:53.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:52 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:52 vm06.local ceph-mon[52574]: pgmap v73: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 3.3 KiB/s wr, 9 op/s 2026-03-06T22:27:53.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:52 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:52 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:52 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:52 vm06.local ceph-mon[52574]: Deploying daemon haproxy.nfs.foo.vm06.kpjmdj on vm06 2026-03-06T22:27:53.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:52 vm06.local ceph-mon[52574]: from='client.14628 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:53.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:52 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:53.426 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:53.737 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:53.737 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:51.989473Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:53.804 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:54.806 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:55.108 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:54 vm01.local ceph-mon[46942]: pgmap v74: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 3.0 KiB/s wr, 8 op/s 2026-03-06T22:27:55.109 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:54 vm01.local ceph-mon[46942]: from='client.14632 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:55.155 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:55.336 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:54 vm06.local ceph-mon[52574]: pgmap v74: 97 pgs: 97 active+clean; 452 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 3.0 KiB/s wr, 8 op/s 2026-03-06T22:27:55.336 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:54 vm06.local ceph-mon[52574]: from='client.14632 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:55.482 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:55.482 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:51.989473Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:55.545 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:56.545 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:56.859 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: pgmap v75: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.9 KiB/s wr, 7 op/s 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: from='client.14636 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: 12.12.1.101 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: 12.12.1.101 is in 12.12.0.0/22 on vm01 interface eth0 2026-03-06T22:27:56.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:56 vm01.local ceph-mon[46942]: Deploying daemon keepalived.nfs.foo.vm06.vgfefr on vm06 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: pgmap v75: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.9 KiB/s wr, 7 op/s 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: from='client.14636 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: 12.12.1.101 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: 12.12.1.101 is in 12.12.0.0/22 on vm01 interface eth0 2026-03-06T22:27:57.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:56 vm06.local ceph-mon[52574]: Deploying daemon keepalived.nfs.foo.vm06.vgfefr on vm06 2026-03-06T22:27:57.177 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:57.177 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:55.665853Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:57.244 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:58.245 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:27:58.590 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:27:58.872 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:58 vm01.local ceph-mon[46942]: from='client.14640 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:58.872 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:58 vm01.local ceph-mon[46942]: pgmap v76: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 2.5 KiB/s wr, 6 op/s 2026-03-06T22:27:58.872 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:58 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:58.872 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:58 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:58.872 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:58 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:58.910 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:27:58.910 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:55.665853Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:27:58.980 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:27:59.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:58 vm06.local ceph-mon[52574]: from='client.14640 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:27:59.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:58 vm06.local ceph-mon[52574]: pgmap v76: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 2.5 KiB/s wr, 6 op/s 2026-03-06T22:27:59.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:58 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:59.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:58 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:27:59.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:58 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:27:59.981 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:28:00.006 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:27:59 vm01.local ceph-mon[46942]: from='client.24383 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:00.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:27:59 vm06.local ceph-mon[52574]: from='client.24383 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:00.383 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:00.890 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:28:00.890 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:59.962050Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:28:00.943 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:28:01.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:00 vm01.local ceph-mon[46942]: pgmap v77: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 2.3 KiB/s wr, 6 op/s 2026-03-06T22:28:01.372 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:00 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:01.372 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:00 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:01.372 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:00 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:01.372 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:00 vm01.local ceph-mon[46942]: 12.12.1.101 is in 12.12.0.0/22 on vm01 interface eth0 2026-03-06T22:28:01.372 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:00 vm01.local ceph-mon[46942]: 12.12.1.101 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-06T22:28:01.372 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:00 vm01.local ceph-mon[46942]: Deploying daemon keepalived.nfs.foo.vm01.nicgzx on vm01 2026-03-06T22:28:01.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:00 vm06.local ceph-mon[52574]: pgmap v77: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 2.3 KiB/s wr, 6 op/s 2026-03-06T22:28:01.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:00 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:01.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:00 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:01.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:00 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:01.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:00 vm06.local ceph-mon[52574]: 12.12.1.101 is in 12.12.0.0/22 on vm01 interface eth0 2026-03-06T22:28:01.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:00 vm06.local ceph-mon[52574]: 12.12.1.101 is in 12.12.0.0/22 on vm06 interface eth0 2026-03-06T22:28:01.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:00 vm06.local ceph-mon[52574]: Deploying daemon keepalived.nfs.foo.vm01.nicgzx on vm01 2026-03-06T22:28:01.943 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:28:02.284 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:02.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:01 vm01.local ceph-mon[46942]: from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:02.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:01 vm06.local ceph-mon[52574]: from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:02.681 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:28:02.681 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:59.962050Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:28:02.776 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:28:03.582 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:03 vm06.local ceph-mon[52574]: pgmap v78: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 938 B/s wr, 1 op/s 2026-03-06T22:28:03.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:03 vm01.local ceph-mon[46942]: pgmap v78: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 938 B/s wr, 1 op/s 2026-03-06T22:28:03.776 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:28:04.237 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:04.527 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:04 vm01.local ceph-mon[46942]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:04.527 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:04 vm01.local ceph-mon[46942]: pgmap v79: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 255 B/s wr, 0 op/s 2026-03-06T22:28:04.623 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:28:04.623 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:45.937314Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:45.937362Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:27:59.962050Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "ports": [2999, 9999], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:45.937583Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:45.937420Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:45.937448Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:45.937392Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:45.937475Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:28:04.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:04 vm06.local ceph-mon[52574]: from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:04.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:04 vm06.local ceph-mon[52574]: pgmap v79: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 255 B/s wr, 0 op/s 2026-03-06T22:28:04.797 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:28:05.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:05 vm01.local ceph-mon[46942]: from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:05.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:05 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:05 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:05 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:05 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:05 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:05.569 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:05 vm06.local ceph-mon[52574]: from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:05.570 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:05 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.570 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:05 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.570 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:05 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.570 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:05 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:05.570 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:05 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:05.798 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:28:06.239 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:06.453 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:06 vm06.local ceph-mon[52574]: pgmap v80: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 255 B/s wr, 0 op/s 2026-03-06T22:28:06.526 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:06 vm01.local ceph-mon[46942]: pgmap v80: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 255 B/s wr, 0 op/s 2026-03-06T22:28:06.685 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:28:06.685 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:27:46.657830Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:27:46.657720Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:27:46.657754Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:27:46.657863Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:28:05.109032Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "last_refresh": "2026-03-06T21:28:06.476471Z", "ports": [2999, 9999], "running": 2, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:27:46.658043Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:27:46.657684Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:27:46.657628Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:27:47.786646Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "ports": [12999], "running": 0, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:27:46.657786Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:27:46.657924Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:27:46.657893Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:28:06.750 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T22:28:07.484 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='client.24393 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:07.485 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:07 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.751 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='client.24393 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:07.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:07 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:08.201 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:08.573 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:28:08.573 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:28:07.177844Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:28:06.476155Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:28:06.476210Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:28:07.177875Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:28:05.109032Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "last_refresh": "2026-03-06T21:28:06.476471Z", "ports": [2999, 9999], "running": 4, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:28:06.476444Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:28:06.476277Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:28:06.476306Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:28:07.402922Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "last_refresh": "2026-03-06T21:28:07.178091Z", "ports": [12999], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:28:06.476246Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:28:06.476335Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "last_refresh": "2026-03-06T21:28:07.177908Z", "ports": [9095], "running": 1, "size": 1}}] 2026-03-06T22:28:08.689 INFO:tasks.cephadm:nfs.foo has 1/1 2026-03-06T22:28:08.690 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-06T22:28:08.692 INFO:tasks.cephadm:Waiting for ceph service ingress.nfs.foo to start (timeout 300)... 2026-03-06T22:28:08.692 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- ceph orch ls -f json 2026-03-06T22:28:08.857 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: pgmap v81: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-06T22:28:08.857 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: Reconfiguring prometheus.vm01 (dependencies changed)... 2026-03-06T22:28:08.857 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: Reconfiguring daemon prometheus.vm01 on vm01 2026-03-06T22:28:08.857 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:08.857 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:08.857 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:08.857 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:28:08.858 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:08 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:09.056 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: pgmap v81: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: Reconfiguring prometheus.vm01 (dependencies changed)... 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: Reconfiguring daemon prometheus.vm01 on vm01 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:28:09.104 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:08 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:09.439 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-06T22:28:09.439 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T21:25:19.086941Z", "last_refresh": "2026-03-06T21:28:07.177844Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:20.423043Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T21:25:17.291925Z", "last_refresh": "2026-03-06T21:28:06.476155Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:21.570169Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T21:25:16.839966Z", "last_refresh": "2026-03-06T21:28:06.476210Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T21:25:18.237488Z", "last_refresh": "2026-03-06T21:28:07.177875Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:28:05.109032Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2999, "monitor_port": 9999, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-03-06T21:27:44.577886Z", "last_refresh": "2026-03-06T21:28:06.476471Z", "ports": [2999, 9999], "running": 4, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-03-06T21:27:44.904465Z service:mds.foofs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foofs", "service_name": "mds.foofs", "service_type": "mds", "status": {"created": "2026-03-06T21:27:42.485113Z", "last_refresh": "2026-03-06T21:28:06.476444Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:25.744099Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T21:25:16.395746Z", "last_refresh": "2026-03-06T21:28:06.476277Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:27.211337Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm06:192.168.123.106=vm06"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T21:25:43.655237Z", "last_refresh": "2026-03-06T21:28:06.476306Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T21:28:07.402922Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12999}, "status": {"created": "2026-03-06T21:27:44.547509Z", "last_refresh": "2026-03-06T21:28:07.178091Z", "ports": [12999], "running": 1, "size": 1}}, {"events": ["2026-03-06T21:26:24.376042Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T21:25:18.662175Z", "last_refresh": "2026-03-06T21:28:06.476246Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T21:26:37.378094Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T21:26:37.374090Z", "last_refresh": "2026-03-06T21:28:06.476335Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T21:26:27.214321Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T21:25:17.733386Z", "ports": [9095], "running": 0, "size": 1}}] 2026-03-06T22:28:09.620 INFO:tasks.cephadm:ingress.nfs.foo has 4/4 2026-03-06T22:28:09.620 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-06T22:28:09.622 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:28:09.622 DEBUG:teuthology.orchestra.run.vm01:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mkdir /mnt/foo' 2026-03-06T22:28:09.664 INFO:teuthology.orchestra.run.vm01.stderr:+ mkdir /mnt/foo 2026-03-06T22:28:09.666 DEBUG:teuthology.orchestra.run.vm01:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'sleep 5' 2026-03-06T22:28:09.747 INFO:teuthology.orchestra.run.vm01.stderr:+ sleep 5 2026-03-06T22:28:10.013 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:09 vm01.local ceph-mon[46942]: from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:10.014 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:09 vm01.local ceph-mon[46942]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:28:10.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:09 vm06.local ceph-mon[52574]: from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:10.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:09 vm06.local ceph-mon[52574]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: pgmap v82: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:11.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:10 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: pgmap v82: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:11.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:10 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:13.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:12 vm01.local ceph-mon[46942]: pgmap v83: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-06T22:28:13.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:12 vm06.local ceph-mon[52574]: pgmap v83: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-06T22:28:14.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:13 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.b", "id": [0, 6]}]: dispatch 2026-03-06T22:28:14.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:13 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.2", "id": [1, 0]}]: dispatch 2026-03-06T22:28:14.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:13 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 0]}]: dispatch 2026-03-06T22:28:14.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:13 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.14", "id": [1, 7]}]: dispatch 2026-03-06T22:28:14.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:13 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:14.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:13 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.b", "id": [0, 6]}]: dispatch 2026-03-06T22:28:14.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:13 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.2", "id": [1, 0]}]: dispatch 2026-03-06T22:28:14.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:13 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 0]}]: dispatch 2026-03-06T22:28:14.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:13 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.14", "id": [1, 7]}]: dispatch 2026-03-06T22:28:14.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:13 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:14.749 DEBUG:teuthology.orchestra.run.vm01:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mount -t nfs 12.12.1.101:/fake /mnt/foo -o port=2999' 2026-03-06T22:28:14.814 INFO:teuthology.orchestra.run.vm01.stderr:+ mount -t nfs 12.12.1.101:/fake /mnt/foo -o port=2999 2026-03-06T22:28:15.006 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:14 vm01.local ceph-mon[46942]: pgmap v84: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-06T22:28:15.006 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:14 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.b", "id": [0, 6]}]': finished 2026-03-06T22:28:15.006 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:14 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.2", "id": [1, 0]}]': finished 2026-03-06T22:28:15.006 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:14 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 0]}]': finished 2026-03-06T22:28:15.006 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:14 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.14", "id": [1, 7]}]': finished 2026-03-06T22:28:15.006 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:14 vm01.local ceph-mon[46942]: osdmap e37: 8 total, 8 up, 8 in 2026-03-06T22:28:15.014 DEBUG:teuthology.orchestra.run.vm01:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'echo test > /mnt/foo/testfile' 2026-03-06T22:28:15.077 INFO:teuthology.orchestra.run.vm01.stderr:+ echo test 2026-03-06T22:28:15.111 DEBUG:teuthology.orchestra.run.vm01:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c sync 2026-03-06T22:28:15.174 INFO:teuthology.orchestra.run.vm01.stderr:+ sync 2026-03-06T22:28:15.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:14 vm06.local ceph-mon[52574]: pgmap v84: 97 pgs: 97 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 0 op/s 2026-03-06T22:28:15.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:14 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.b", "id": [0, 6]}]': finished 2026-03-06T22:28:15.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:14 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.2", "id": [1, 0]}]': finished 2026-03-06T22:28:15.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:14 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 0]}]': finished 2026-03-06T22:28:15.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:14 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.14", "id": [1, 7]}]': finished 2026-03-06T22:28:15.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:14 vm06.local ceph-mon[52574]: osdmap e37: 8 total, 8 up, 8 in 2026-03-06T22:28:15.538 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T22:28:15.540 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -v /mnt/foo:/mnt/foo -- bash -c 'echo "Check with each haproxy down in turn..." 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '"'"'{print $1}'"'"'`; do 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> ceph orch daemon stop $haproxy 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> cat /mnt/foo/testfile 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> echo $haproxy > /mnt/foo/testfile 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> sync 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> ceph orch daemon start $haproxy 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> done 2026-03-06T22:28:15.540 DEBUG:teuthology.orchestra.run.vm01:> ' 2026-03-06T22:28:15.909 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:15.990 INFO:teuthology.orchestra.run.vm01.stdout:Check with each haproxy down in turn... 2026-03-06T22:28:16.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:15 vm01.local ceph-mon[46942]: osdmap e38: 8 total, 8 up, 8 in 2026-03-06T22:28:16.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:15 vm06.local ceph-mon[52574]: osdmap e38: 8 total, 8 up, 8 in 2026-03-06T22:28:16.423 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop haproxy.nfs.foo.vm01.ghmxpc on host 'vm01' 2026-03-06T22:28:17.143 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:16 vm01.local ceph-mon[46942]: pgmap v87: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-06T22:28:17.143 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:16 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:17.143 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:16 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:17.143 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:16 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:17.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:16 vm06.local ceph-mon[52574]: pgmap v87: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-06T22:28:17.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:16 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:17.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:16 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:17.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:16 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:18.165 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:17 vm01.local ceph-mon[46942]: from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:18.165 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:17 vm01.local ceph-mon[46942]: from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm01.ghmxpc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:18.165 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:17 vm01.local ceph-mon[46942]: Schedule stop daemon haproxy.nfs.foo.vm01.ghmxpc 2026-03-06T22:28:18.165 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:17 vm01.local ceph-mon[46942]: from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:18.165 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:17 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.165 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:17 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:17 vm06.local ceph-mon[52574]: from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:17 vm06.local ceph-mon[52574]: from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm01.ghmxpc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:17 vm06.local ceph-mon[52574]: Schedule stop daemon haproxy.nfs.foo.vm01.ghmxpc 2026-03-06T22:28:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:17 vm06.local ceph-mon[52574]: from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:17 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:17 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: pgmap v88: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:18.979 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.980 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.980 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:18.986 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:18 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: pgmap v88: 97 pgs: 1 peering, 96 active+clean; 453 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:19.335 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:19.336 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:19.336 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:19.336 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:19.336 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:18 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:20.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:19 vm01.local ceph-mon[46942]: from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:20.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:19 vm06.local ceph-mon[52574]: from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:20.990 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:20 vm01.local ceph-mon[46942]: pgmap v89: 97 pgs: 1 peering, 96 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:20.990 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:20 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:20.990 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:20 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:20.990 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:20 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:20.990 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:20 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:20 vm06.local ceph-mon[52574]: pgmap v89: 97 pgs: 1 peering, 96 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:20 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:20 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:20 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:21.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:20 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:21.416 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm01.ghmxpc vm01 *:2999,9999 stopped 0s ago 29s - - 2026-03-06T22:28:22.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:21 vm01.local ceph-mon[46942]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:22.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:21 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:22.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:21 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:22.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:21 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:22.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:21 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:22.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:21 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:22.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:21 vm06.local ceph-mon[52574]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:22.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:21 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:22.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:21 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:22.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:21 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:22.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:21 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:22.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:21 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:23.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:22 vm01.local ceph-mon[46942]: pgmap v90: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:23.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:22 vm01.local ceph-mon[46942]: from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:23.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:22 vm06.local ceph-mon[52574]: pgmap v90: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:23.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:22 vm06.local ceph-mon[52574]: from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:24.426 INFO:teuthology.orchestra.run.vm01.stdout:test 2026-03-06T22:28:24.625 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to start haproxy.nfs.foo.vm01.ghmxpc on host 'vm01' 2026-03-06T22:28:25.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:25 vm01.local ceph-mon[46942]: pgmap v91: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:25.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:25 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:25.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:25 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:25.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:25 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:25.286 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:25 vm06.local ceph-mon[52574]: pgmap v91: 97 pgs: 97 active+clean; 460 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:25.286 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:25 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:25.286 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:25 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:25.286 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:25 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:26.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:26 vm01.local ceph-mon[46942]: from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm01.ghmxpc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:26.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:26 vm01.local ceph-mon[46942]: Schedule start daemon haproxy.nfs.foo.vm01.ghmxpc 2026-03-06T22:28:26.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:26 vm01.local ceph-mon[46942]: from='client.14704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:26.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:26 vm06.local ceph-mon[52574]: from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm01.ghmxpc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:26.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:26 vm06.local ceph-mon[52574]: Schedule start daemon haproxy.nfs.foo.vm01.ghmxpc 2026-03-06T22:28:26.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:26 vm06.local ceph-mon[52574]: from='client.14704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: pgmap v92: 97 pgs: 97 active+clean; 461 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s wr, 1 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:27.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:27 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: pgmap v92: 97 pgs: 97 active+clean; 461 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s wr, 1 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:27.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:27 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:28.680 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:28 vm01.local ceph-mon[46942]: from='client.24425 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:28.681 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:28 vm01.local ceph-mon[46942]: pgmap v93: 97 pgs: 97 active+clean; 461 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:28.681 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:28 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:28.681 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:28 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:28.681 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:28 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:28.681 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:28 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:28.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:28 vm06.local ceph-mon[52574]: from='client.24425 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:28.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:28 vm06.local ceph-mon[52574]: pgmap v93: 97 pgs: 97 active+clean; 461 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s wr, 0 op/s; 1 B/s, 0 keys/s, 0 objects/s recovering 2026-03-06T22:28:28.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:28 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:28.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:28 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:28.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:28 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:28.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:28 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:29.602 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm01.ghmxpc vm01 *:2999,9999 running (2s) 0s ago 37s 3590k - 2.3.17-d1c9119 e85424b0d443 0a20c053d1f6 2026-03-06T22:28:29.769 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop haproxy.nfs.foo.vm06.kpjmdj on host 'vm06' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='client.14716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:29 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='client.14716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:30.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:29 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:31.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:30 vm06.local ceph-mon[52574]: pgmap v94: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.2 KiB/s wr, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-06T22:28:31.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:30 vm06.local ceph-mon[52574]: from='client.14720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:31.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:30 vm06.local ceph-mon[52574]: from='client.14724 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm06.kpjmdj", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:31.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:30 vm06.local ceph-mon[52574]: Schedule stop daemon haproxy.nfs.foo.vm06.kpjmdj 2026-03-06T22:28:31.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:30 vm06.local ceph-mon[52574]: from='client.24441 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:31.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:30 vm01.local ceph-mon[46942]: pgmap v94: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.2 KiB/s wr, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-06T22:28:31.346 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:30 vm01.local ceph-mon[46942]: from='client.14720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:31.347 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:30 vm01.local ceph-mon[46942]: from='client.14724 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.nfs.foo.vm06.kpjmdj", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:31.347 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:30 vm01.local ceph-mon[46942]: Schedule stop daemon haproxy.nfs.foo.vm06.kpjmdj 2026-03-06T22:28:31.347 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:30 vm01.local ceph-mon[46942]: from='client.24441 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='client.14732 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: pgmap v95: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:32.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:32 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='client.14732 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: pgmap v95: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:32.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:32 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:33.439 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:33 vm01.local ceph-mon[46942]: from='client.14736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:33.439 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:33 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:33.439 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:33 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:33.439 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:33 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:33.581 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm06.kpjmdj vm06 *:2999,9999 stopped 0s ago 37s - - 2026-03-06T22:28:33.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:33 vm06.local ceph-mon[52574]: from='client.14736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:33.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:33 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:33.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:33 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:33.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:33 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: pgmap v96: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='client.14740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:34.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:34 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: pgmap v96: 97 pgs: 97 active+clean; 471 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.2 KiB/s wr, 1 op/s 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='client.14740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:34.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:34 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:36.649 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm01.ghmxpc 2026-03-06T22:28:36.834 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to start haproxy.nfs.foo.vm06.kpjmdj on host 'vm06' 2026-03-06T22:28:36.834 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:36 vm01.local ceph-mon[46942]: pgmap v97: 97 pgs: 97 active+clean; 472 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-06T22:28:36.904 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:36 vm06.local ceph-mon[52574]: pgmap v97: 97 pgs: 97 active+clean; 472 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-06T22:28:38.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:37 vm01.local ceph-mon[46942]: from='client.14744 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm06.kpjmdj", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:38.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:37 vm01.local ceph-mon[46942]: Schedule start daemon haproxy.nfs.foo.vm06.kpjmdj 2026-03-06T22:28:38.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:37 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:38.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:37 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:38.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:37 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:38.122 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:37 vm01.local ceph-mon[46942]: from='client.14748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:38.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:37 vm06.local ceph-mon[52574]: from='client.14744 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.nfs.foo.vm06.kpjmdj", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:38.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:37 vm06.local ceph-mon[52574]: Schedule start daemon haproxy.nfs.foo.vm06.kpjmdj 2026-03-06T22:28:38.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:37 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:38.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:37 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:38.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:37 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:38.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:37 vm06.local ceph-mon[52574]: from='client.14748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:38.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: pgmap v98: 97 pgs: 97 active+clean; 472 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-06T22:28:38.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:38.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:38.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:38.838 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:38.838 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:38.838 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:38.838 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:38 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: pgmap v98: 97 pgs: 97 active+clean; 472 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:39.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:38 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:39 vm01.local ceph-mon[46942]: from='client.14752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:40.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:39 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:39 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:39 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:40.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:39 vm06.local ceph-mon[52574]: from='client.14752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:40.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:39 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:39 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.155 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:39 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:40.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:40 vm06.local ceph-mon[52574]: pgmap v99: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.4 KiB/s wr, 1 op/s 2026-03-06T22:28:40.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:40 vm06.local ceph-mon[52574]: from='client.14756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:40.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:40 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.847 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:40 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:40 vm01.local ceph-mon[46942]: pgmap v99: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 2.4 KiB/s wr, 1 op/s 2026-03-06T22:28:40.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:40 vm01.local ceph-mon[46942]: from='client.14756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:40.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:40 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:40.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:40 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:41.839 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm06.kpjmdj vm06 *:2999,9999 running (2s) 1s ago 46s 3628k - 2.3.17-d1c9119 e85424b0d443 60dec9495194 2026-03-06T22:28:41.885 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-06T22:28:41.887 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'echo "Check with $(hostname) ganesha(s) down..." 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> for c in `systemctl | grep ceph- | grep @nfs | awk '"'"'{print $1}'"'"'`; do 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> cid=`echo $c | sed '"'"'s/@/-/'"'"'` 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> id=`echo $c | cut -d @ -f 2 | sed '"'"'s/.service$//'"'"'` 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-` 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> echo "Removing daemon $id fsid $fsid..." 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> echo "Waking up cephadm..." 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> while ! timeout 1 cat /mnt/foo/testfile ; do true ; done 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> echo "Mount is back!" 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> done 2026-03-06T22:28:41.888 DEBUG:teuthology.orchestra.run.vm01:> ' 2026-03-06T22:28:41.913 INFO:teuthology.orchestra.run.vm01.stderr:++ hostname 2026-03-06T22:28:41.914 INFO:teuthology.orchestra.run.vm01.stdout:Check with vm01.local ganesha(s) down... 2026-03-06T22:28:41.914 INFO:teuthology.orchestra.run.vm01.stderr:+ echo 'Check with vm01.local ganesha(s) down...' 2026-03-06T22:28:41.914 INFO:teuthology.orchestra.run.vm01.stderr:++ systemctl 2026-03-06T22:28:41.914 INFO:teuthology.orchestra.run.vm01.stderr:++ awk '{print $1}' 2026-03-06T22:28:41.916 INFO:teuthology.orchestra.run.vm01.stderr:++ grep ceph- 2026-03-06T22:28:41.919 INFO:teuthology.orchestra.run.vm01.stderr:++ grep @nfs 2026-03-06T22:28:41.920 INFO:teuthology.orchestra.run.vm01.stderr:+ for c in `systemctl | grep ceph- | grep @nfs | awk '{print $1}'` 2026-03-06T22:28:41.921 INFO:teuthology.orchestra.run.vm01.stderr:++ echo ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@nfs.foo.0.0.vm01.mkzqsn.service 2026-03-06T22:28:41.921 INFO:teuthology.orchestra.run.vm01.stderr:++ sed s/@/-/ 2026-03-06T22:28:41.922 INFO:teuthology.orchestra.run.vm01.stderr:+ cid=ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-nfs.foo.0.0.vm01.mkzqsn.service 2026-03-06T22:28:41.922 INFO:teuthology.orchestra.run.vm01.stderr:++ echo ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@nfs.foo.0.0.vm01.mkzqsn.service 2026-03-06T22:28:41.922 INFO:teuthology.orchestra.run.vm01.stderr:++ cut -d @ -f 2 2026-03-06T22:28:41.922 INFO:teuthology.orchestra.run.vm01.stderr:++ sed 's/.service$//' 2026-03-06T22:28:41.923 INFO:teuthology.orchestra.run.vm01.stderr:+ id=nfs.foo.0.0.vm01.mkzqsn 2026-03-06T22:28:41.923 INFO:teuthology.orchestra.run.vm01.stderr:++ echo ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@nfs.foo.0.0.vm01.mkzqsn.service 2026-03-06T22:28:41.923 INFO:teuthology.orchestra.run.vm01.stderr:++ cut -d - -f 2- 2026-03-06T22:28:41.923 INFO:teuthology.orchestra.run.vm01.stderr:++ cut -d @ -f 1 2026-03-06T22:28:41.924 INFO:teuthology.orchestra.run.vm01.stderr:+ fsid=c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:28:41.924 INFO:teuthology.orchestra.run.vm01.stdout:Removing daemon nfs.foo.0.0.vm01.mkzqsn fsid c76e688a-19a2-11f1-bdea-01160fc6f239... 2026-03-06T22:28:41.924 INFO:teuthology.orchestra.run.vm01.stderr:+ echo 'Removing daemon nfs.foo.0.0.vm01.mkzqsn fsid c76e688a-19a2-11f1-bdea-01160fc6f239...' 2026-03-06T22:28:41.924 INFO:teuthology.orchestra.run.vm01.stderr:+ sudo /home/ubuntu/cephtest/cephadm rm-daemon --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 --name nfs.foo.0.0.vm01.mkzqsn 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='client.14760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:42.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:41 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='client.14760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:42.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:41 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:42.747 INFO:teuthology.orchestra.run.vm01.stdout:Waking up cephadm... 2026-03-06T22:28:42.748 INFO:teuthology.orchestra.run.vm01.stderr:+ echo 'Waking up cephadm...' 2026-03-06T22:28:42.748 INFO:teuthology.orchestra.run.vm01.stderr:+ sudo /home/ubuntu/cephtest/cephadm shell -- ceph orch ps --refresh 2026-03-06T22:28:43.061 INFO:teuthology.orchestra.run.vm01.stderr:Inferring fsid c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:28:43.114 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:43.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:42 vm01.local ceph-mon[46942]: pgmap v100: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-06T22:28:43.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:42 vm01.local ceph-mon[46942]: from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:43.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:42 vm06.local ceph-mon[52574]: pgmap v100: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-06T22:28:43.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:42 vm06.local ceph-mon[52574]: from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:43.215 INFO:teuthology.orchestra.run.vm01.stderr:Using ceph image with id '8bccc98d839a' and tag 'cobaltcore-storage-v19.2.3-fasttrack-5' created on 2026-03-06 14:41:18 +0000 UTC 2026-03-06T22:28:43.215 INFO:teuthology.orchestra.run.vm01.stderr:harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.vm01 vm01 *:9093,9094 running (2m) 1s ago 2m 22.6M - 0.25.0 c8568f914cd2 4365d374f776 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm01 vm01 running (3m) 1s ago 3m 9294k - 19.2.3-39-g340d3c24fc6 8bccc98d839a c48ec14a483b 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm06 vm06 running (2m) 2s ago 2m 6865k - 19.2.3-39-g340d3c24fc6 8bccc98d839a 72acd665fd43 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm01 vm01 running (3m) 1s ago 3m 11.2M - 19.2.3-39-g340d3c24fc6 8bccc98d839a c2e845502308 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm06 vm06 running (2m) 2s ago 2m 11.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a d5f5cd20661b 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:grafana.vm01 vm01 *:3000 running (2m) 1s ago 2m 86.1M - 10.4.0 c8b91775d855 193f68923397 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm01.ghmxpc vm01 *:2999,9999 running (15s) 1s ago 51s 3728k - 2.3.17-d1c9119 e85424b0d443 0a20c053d1f6 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm06.kpjmdj vm06 *:2999,9999 running (4s) 2s ago 47s 3628k - 2.3.17-d1c9119 e85424b0d443 60dec9495194 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:keepalived.nfs.foo.vm01.nicgzx vm01 running (38s) 1s ago 38s 2377k - 2.2.4 4a3a1ff181d9 b3a828c3e20f 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:keepalived.nfs.foo.vm06.vgfefr vm06 running (43s) 2s ago 43s 2395k - 2.2.4 4a3a1ff181d9 3e50f326dd2c 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:mds.foofs.vm01.aitcjt vm01 running (58s) 1s ago 58s 16.0M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 2ae861a54c2b 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:mds.foofs.vm06.aeobze vm06 running (59s) 2s ago 59s 16.7M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 364ed570a03d 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm01.mrlynj vm01 *:9283,8765,8443 running (3m) 1s ago 3m 567M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 73059a4c7a15 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm06.awlziz vm06 *:8443,9283,8765 running (2m) 2s ago 2m 477M - 19.2.3-39-g340d3c24fc6 8bccc98d839a d02607312b6f 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm01 vm01 running (4m) 1s ago 4m 51.2M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a 7fb7e5e913b1 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm06 vm06 running (2m) 2s ago 2m 40.5M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a d717d212d7fb 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:nfs.foo.0.0.vm01.mkzqsn vm01 *:12999 running (55s) 1s ago 55s 54.9M - 5.9 8bccc98d839a 26a672b22e12 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm01 vm01 *:9100 running (2m) 1s ago 2m 9550k - 1.7.0 72c9c2088986 ec0df0927f80 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm06 vm06 *:9100 running (2m) 2s ago 2m 10.4M - 1.7.0 72c9c2088986 b6c10aca64f3 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm06 running (100s) 2s ago 100s 41.2M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 213b5980df4e 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (99s) 1s ago 99s 67.5M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a cbe973f313a3 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (95s) 1s ago 95s 44.2M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a e510991af211 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm06 running (96s) 2s ago 96s 64.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a a0c06f5a2620 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (93s) 2s ago 93s 44.2M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a ab91fb94fa67 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm01 running (92s) 1s ago 92s 45.1M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a ef271b290e75 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (90s) 2s ago 89s 67.2M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a bcb18e202ffa 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm01 running (88s) 1s ago 88s 65.2M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 37a8ab69a547 2026-03-06T22:28:43.550 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.vm01 vm01 *:9095 running (35s) 1s ago 2m 38.0M - 2.51.0 1d3b7f56885b 72613595ac06 2026-03-06T22:28:43.606 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:44.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:44.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:43 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:44.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:44.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:43 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:44.609 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:44.609 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:44.868 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:44 vm01.local ceph-mon[46942]: pgmap v101: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-06T22:28:44.868 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:44 vm01.local ceph-mon[46942]: from='client.14768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:44.868 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:44.868 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:44 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:44.868 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:44 vm06.local ceph-mon[52574]: pgmap v101: 97 pgs: 97 active+clean; 482 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.3 KiB/s wr, 1 op/s 2026-03-06T22:28:44.868 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:44 vm06.local ceph-mon[52574]: from='client.14768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:28:44.868 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:45.154 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:44 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:45.610 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:45.610 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: Fencing old nfs.foo.0.0.vm01.mkzqsn 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm01.mkzqsn"}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm01.mkzqsn"}]': finished 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: Creating key for client.nfs.foo.0.1.vm01.qlmyqs 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: Rados config object exists: conf-nfs.foo 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: Creating key for client.nfs.foo.0.1.vm01.qlmyqs-rgw 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: Bind address in nfs.foo.0.1.vm01.qlmyqs's ganesha conf is defaulting to empty 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: Deploying daemon nfs.foo.0.1.vm01.qlmyqs on vm01 2026-03-06T22:28:46.147 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:46 vm01.local ceph-mon[46942]: pgmap v102: 97 pgs: 97 active+clean; 483 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: Fencing old nfs.foo.0.0.vm01.mkzqsn 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm01.mkzqsn"}]: dispatch 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth rm", "entity": "client.nfs.foo.0.0.vm01.mkzqsn"}]': finished 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: Creating key for client.nfs.foo.0.1.vm01.qlmyqs 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-06T22:28:46.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: Rados config object exists: conf-nfs.foo 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: Creating key for client.nfs.foo.0.1.vm01.qlmyqs-rgw 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.1.vm01.qlmyqs-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: Bind address in nfs.foo.0.1.vm01.qlmyqs's ganesha conf is defaulting to empty 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: Deploying daemon nfs.foo.0.1.vm01.qlmyqs on vm01 2026-03-06T22:28:46.405 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:46 vm06.local ceph-mon[52574]: pgmap v102: 97 pgs: 97 active+clean; 483 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 1.5 KiB/s wr, 1 op/s 2026-03-06T22:28:46.611 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:46.611 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:47.281 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.282 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.282 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.282 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.282 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:47.282 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: Evicting (and blocklisting) client session 24375 (192.168.123.101:0/4126802526) 2026-03-06T22:28:47.282 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: from='mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778]' entity='mds.foofs.vm06.aeobze' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.101:0/4126802526"}]: dispatch 2026-03-06T22:28:47.282 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:47 vm01.local ceph-mon[46942]: from='mds.? ' entity='mds.foofs.vm06.aeobze' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.101:0/4126802526"}]: dispatch 2026-03-06T22:28:47.614 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:47.614 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: Evicting (and blocklisting) client session 24375 (192.168.123.101:0/4126802526) 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: from='mds.? [v2:192.168.123.106:6832/122259778,v1:192.168.123.106:6833/122259778]' entity='mds.foofs.vm06.aeobze' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.101:0/4126802526"}]: dispatch 2026-03-06T22:28:47.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:47 vm06.local ceph-mon[52574]: from='mds.? ' entity='mds.foofs.vm06.aeobze' cmd=[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.101:0/4126802526"}]: dispatch 2026-03-06T22:28:48.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:48 vm01.local ceph-mon[46942]: pgmap v103: 97 pgs: 97 active+clean; 483 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 1.4 KiB/s wr, 1 op/s 2026-03-06T22:28:48.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:48 vm01.local ceph-mon[46942]: from='mds.? ' entity='mds.foofs.vm06.aeobze' cmd='[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.101:0/4126802526"}]': finished 2026-03-06T22:28:48.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:48 vm01.local ceph-mon[46942]: osdmap e39: 8 total, 8 up, 8 in 2026-03-06T22:28:48.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:48 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:48.615 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:48.615 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:48.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:48 vm06.local ceph-mon[52574]: pgmap v103: 97 pgs: 97 active+clean; 483 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 1.4 KiB/s wr, 1 op/s 2026-03-06T22:28:48.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:48 vm06.local ceph-mon[52574]: from='mds.? ' entity='mds.foofs.vm06.aeobze' cmd='[{"prefix":"osd blocklist", "blocklistop":"add","addr":"192.168.123.101:0/4126802526"}]': finished 2026-03-06T22:28:48.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:48 vm06.local ceph-mon[52574]: osdmap e39: 8 total, 8 up, 8 in 2026-03-06T22:28:48.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:48 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.616 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:49.616 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: Reconfiguring haproxy.nfs.foo.vm01.ghmxpc (dependencies changed)... 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: Reconfiguring daemon haproxy.nfs.foo.vm01.ghmxpc on vm01 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.720 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:49 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: Reconfiguring haproxy.nfs.foo.vm01.ghmxpc (dependencies changed)... 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: Reconfiguring daemon haproxy.nfs.foo.vm01.ghmxpc on vm01 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:49.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:49 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:50.618 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:50.618 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:51.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:51 vm01.local ceph-mon[46942]: Reconfiguring haproxy.nfs.foo.vm06.kpjmdj (dependencies changed)... 2026-03-06T22:28:51.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:51 vm01.local ceph-mon[46942]: Reconfiguring daemon haproxy.nfs.foo.vm06.kpjmdj on vm06 2026-03-06T22:28:51.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:51 vm01.local ceph-mon[46942]: pgmap v105: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1023 B/s wr, 2 op/s 2026-03-06T22:28:51.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:51 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:51.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:51 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:51.010 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:51 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:51.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:51 vm06.local ceph-mon[52574]: Reconfiguring haproxy.nfs.foo.vm06.kpjmdj (dependencies changed)... 2026-03-06T22:28:51.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:51 vm06.local ceph-mon[52574]: Reconfiguring daemon haproxy.nfs.foo.vm06.kpjmdj on vm06 2026-03-06T22:28:51.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:51 vm06.local ceph-mon[52574]: pgmap v105: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1023 B/s wr, 2 op/s 2026-03-06T22:28:51.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:51 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:51.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:51 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:51.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:51 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T22:28:51.619 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:51.619 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:52 vm01.local ceph-mon[46942]: pgmap v106: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-06T22:28:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:52 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:52 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:52 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:52.622 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:52.622 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:52.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:52 vm06.local ceph-mon[52574]: pgmap v106: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-06T22:28:52.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:52 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:52.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:52 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:52.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:52 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:53.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:53 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:53.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:53 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:53.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:53 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:53.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:53 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:53.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:53 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:53.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:53 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:53.623 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:53.623 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:53 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:53 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T22:28:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:53 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T22:28:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:53 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:53 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T22:28:53.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:53 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' 2026-03-06T22:28:54.625 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:54.625 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:54.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:54 vm06.local ceph-mon[52574]: pgmap v107: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-06T22:28:54.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:54 vm01.local ceph-mon[46942]: pgmap v107: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-06T22:28:55.626 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:55.626 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:56.627 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:56.627 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:57.121 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:56 vm01.local ceph-mon[46942]: pgmap v108: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1023 B/s wr, 2 op/s 2026-03-06T22:28:57.404 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:56 vm06.local ceph-mon[52574]: pgmap v108: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1023 B/s wr, 2 op/s 2026-03-06T22:28:57.628 INFO:teuthology.orchestra.run.vm01.stderr:+ true 2026-03-06T22:28:57.628 INFO:teuthology.orchestra.run.vm01.stderr:+ timeout 1 cat /mnt/foo/testfile 2026-03-06T22:28:57.633 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm06.kpjmdj 2026-03-06T22:28:57.633 INFO:teuthology.orchestra.run.vm01.stdout:Mount is back! 2026-03-06T22:28:57.633 INFO:teuthology.orchestra.run.vm01.stderr:+ echo 'Mount is back!' 2026-03-06T22:28:57.635 INFO:tasks.vip:Running commands on role host.b host ubuntu@vm06.local 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'echo "Check with $(hostname) ganesha(s) down..." 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> for c in `systemctl | grep ceph- | grep @nfs | awk '"'"'{print $1}'"'"'`; do 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> cid=`echo $c | sed '"'"'s/@/-/'"'"'` 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> id=`echo $c | cut -d @ -f 2 | sed '"'"'s/.service$//'"'"'` 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-` 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> echo "Removing daemon $id fsid $fsid..." 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> echo "Waking up cephadm..." 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> while ! timeout 1 cat /mnt/foo/testfile ; do true ; done 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> echo "Mount is back!" 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> done 2026-03-06T22:28:57.635 DEBUG:teuthology.orchestra.run.vm06:> ' 2026-03-06T22:28:57.661 INFO:teuthology.orchestra.run.vm06.stderr:++ hostname 2026-03-06T22:28:57.661 INFO:teuthology.orchestra.run.vm06.stdout:Check with vm06.local ganesha(s) down... 2026-03-06T22:28:57.661 INFO:teuthology.orchestra.run.vm06.stderr:+ echo 'Check with vm06.local ganesha(s) down...' 2026-03-06T22:28:57.661 INFO:teuthology.orchestra.run.vm06.stderr:++ systemctl 2026-03-06T22:28:57.662 INFO:teuthology.orchestra.run.vm06.stderr:++ awk '{print $1}' 2026-03-06T22:28:57.663 INFO:teuthology.orchestra.run.vm06.stderr:++ grep ceph- 2026-03-06T22:28:57.666 INFO:teuthology.orchestra.run.vm06.stderr:++ grep @nfs 2026-03-06T22:28:57.669 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T22:28:57.672 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-03-06T22:28:57.672 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-03-06T22:28:58.019 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:58.338 INFO:teuthology.orchestra.run.vm01.stdout:167 167 2026-03-06T22:28:58.377 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch status' 2026-03-06T22:28:58.398 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:58 vm01.local ceph-mon[46942]: pgmap v109: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 819 B/s wr, 2 op/s 2026-03-06T22:28:58.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:58 vm06.local ceph-mon[52574]: pgmap v109: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 819 B/s wr, 2 op/s 2026-03-06T22:28:58.714 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:59.053 INFO:teuthology.orchestra.run.vm01.stdout:Backend: cephadm 2026-03-06T22:28:59.053 INFO:teuthology.orchestra.run.vm01.stdout:Available: Yes 2026-03-06T22:28:59.053 INFO:teuthology.orchestra.run.vm01.stdout:Paused: No 2026-03-06T22:28:59.129 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch ps' 2026-03-06T22:28:59.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:28:59 vm01.local ceph-mon[46942]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:59.503 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:28:59.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:28:59 vm06.local ceph-mon[52574]: from='mgr.14221 192.168.123.101:0/2159263368' entity='mgr.vm01.mrlynj' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.vm01 vm01 *:9093,9094 running (2m) 7s ago 3m 22.6M - 0.25.0 c8568f914cd2 4365d374f776 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm01 vm01 running (3m) 7s ago 3m 9311k - 19.2.3-39-g340d3c24fc6 8bccc98d839a c48ec14a483b 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm06 vm06 running (2m) 8s ago 2m 6865k - 19.2.3-39-g340d3c24fc6 8bccc98d839a 72acd665fd43 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm01 vm01 running (3m) 7s ago 3m 11.2M - 19.2.3-39-g340d3c24fc6 8bccc98d839a c2e845502308 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm06 vm06 running (2m) 8s ago 2m 11.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a d5f5cd20661b 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:grafana.vm01 vm01 *:3000 running (2m) 7s ago 3m 86.1M - 10.4.0 c8b91775d855 193f68923397 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm01.ghmxpc vm01 *:2999,9999 running (10s) 7s ago 67s 3649k - 2.3.17-d1c9119 e85424b0d443 0464889ef851 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.nfs.foo.vm06.kpjmdj vm06 *:2999,9999 running (9s) 8s ago 64s 3657k - 2.3.17-d1c9119 e85424b0d443 cfa91bf1cc84 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:keepalived.nfs.foo.vm01.nicgzx vm01 running (54s) 7s ago 54s 2373k - 2.2.4 4a3a1ff181d9 b3a828c3e20f 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:keepalived.nfs.foo.vm06.vgfefr vm06 running (59s) 8s ago 59s 2391k - 2.2.4 4a3a1ff181d9 3e50f326dd2c 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:mds.foofs.vm01.aitcjt vm01 running (75s) 7s ago 74s 16.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 2ae861a54c2b 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:mds.foofs.vm06.aeobze vm06 running (76s) 8s ago 76s 16.9M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 364ed570a03d 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm01.mrlynj vm01 *:9283,8765,8443 running (4m) 7s ago 4m 568M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 73059a4c7a15 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm06.awlziz vm06 *:8443,9283,8765 running (2m) 8s ago 2m 477M - 19.2.3-39-g340d3c24fc6 8bccc98d839a d02607312b6f 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm01 vm01 running (4m) 7s ago 4m 50.9M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a 7fb7e5e913b1 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm06 vm06 running (2m) 8s ago 2m 41.5M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a d717d212d7fb 2026-03-06T22:28:59.840 INFO:teuthology.orchestra.run.vm01.stdout:nfs.foo.0.1.vm01.qlmyqs vm01 *:12999 running (13s) 7s ago 13s 52.9M - 5.9 8bccc98d839a 002a1903435e 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm01 vm01 *:9100 running (3m) 7s ago 3m 9563k - 1.7.0 72c9c2088986 ec0df0927f80 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm06 vm06 *:9100 running (2m) 8s ago 2m 10.4M - 1.7.0 72c9c2088986 b6c10aca64f3 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm06 running (116s) 8s ago 116s 41.5M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 213b5980df4e 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (115s) 7s ago 115s 67.6M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a cbe973f313a3 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm01 running (112s) 7s ago 112s 44.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a e510991af211 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm06 running (113s) 8s ago 113s 64.7M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a a0c06f5a2620 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm06 running (109s) 8s ago 109s 44.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a ab91fb94fa67 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm01 running (108s) 7s ago 108s 45.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a ef271b290e75 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm06 running (106s) 8s ago 106s 67.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a bcb18e202ffa 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm01 running (104s) 7s ago 104s 65.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 37a8ab69a547 2026-03-06T22:28:59.841 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.vm01 vm01 *:9095 running (51s) 7s ago 2m 38.4M - 2.51.0 1d3b7f56885b 72613595ac06 2026-03-06T22:28:59.912 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch ls' 2026-03-06T22:29:00.240 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:29:00.559 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:00 vm01.local ceph-mon[46942]: from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:00.559 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:00 vm01.local ceph-mon[46942]: pgmap v110: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 767 B/s wr, 2 op/s 2026-03-06T22:29:00.559 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:00 vm01.local ceph-mon[46942]: from='client.14796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager ?:9093,9094 1/1 8s ago 3m count:1 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter 2/2 9s ago 3m * 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:crash 2/2 9s ago 3m * 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:grafana ?:3000 1/1 8s ago 3m count:1 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:ingress.nfs.foo 12.12.1.101:2999,9999 4/4 9s ago 75s count:2 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:mds.foofs 2/2 9s ago 78s count:2 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:mgr 2/2 9s ago 3m count:2 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:mon 2/2 9s ago 3m vm01:192.168.123.101=vm01;vm06:192.168.123.106=vm06;count:2 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:nfs.foo ?:12999 1/1 8s ago 76s count:1 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter ?:9100 2/2 9s ago 3m * 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:osd.all-available-devices 8 9s ago 2m * 2026-03-06T22:29:00.559 INFO:teuthology.orchestra.run.vm01.stdout:prometheus ?:9095 1/1 8s ago 3m count:1 2026-03-06T22:29:00.624 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch host ls' 2026-03-06T22:29:00.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:00 vm06.local ceph-mon[52574]: from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:00.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:00 vm06.local ceph-mon[52574]: pgmap v110: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 767 B/s wr, 2 op/s 2026-03-06T22:29:00.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:00 vm06.local ceph-mon[52574]: from='client.14796 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:00.939 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:29:01.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:01 vm01.local ceph-mon[46942]: from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:01.276 INFO:teuthology.orchestra.run.vm01.stdout:HOST ADDR LABELS STATUS 2026-03-06T22:29:01.276 INFO:teuthology.orchestra.run.vm01.stdout:vm01 192.168.123.101 2026-03-06T22:29:01.276 INFO:teuthology.orchestra.run.vm01.stdout:vm06 192.168.123.106 2026-03-06T22:29:01.276 INFO:teuthology.orchestra.run.vm01.stdout:2 hosts in cluster 2026-03-06T22:29:01.330 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch device ls' 2026-03-06T22:29:01.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:01 vm06.local ceph-mon[52574]: from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:01.678 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 79s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 79s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.010 INFO:teuthology.orchestra.run.vm01.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T22:29:02.078 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-03-06T22:29:02.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:02 vm01.local ceph-mon[46942]: pgmap v111: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 511 B/s wr, 1 op/s 2026-03-06T22:29:02.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:02 vm01.local ceph-mon[46942]: from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:02.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:02 vm01.local ceph-mon[46942]: from='client.14808 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:02.404 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/mon.vm01/config 2026-03-06T22:29:02.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:02 vm06.local ceph-mon[52574]: pgmap v111: 97 pgs: 97 active+clean; 484 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 511 B/s wr, 1 op/s 2026-03-06T22:29:02.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:02 vm06.local ceph-mon[52574]: from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:02.654 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:02 vm06.local ceph-mon[52574]: from='client.14808 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:02.741 INFO:teuthology.orchestra.run.vm01.stdout:osd.all-available-devices 8 11s ago 2m * 2026-03-06T22:29:02.812 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-03-06T22:29:02.815 INFO:tasks.vip:Removing 12.12.0.101 (and any VIPs) on vm01.local iface eth0... 2026-03-06T22:29:02.815 DEBUG:teuthology.orchestra.run.vm01:> sudo ip addr del 12.12.0.101/22 dev eth0 2026-03-06T22:29:02.839 DEBUG:teuthology.orchestra.run.vm01:> sudo ip addr del 12.12.1.101/22 dev eth0 2026-03-06T22:29:02.906 INFO:tasks.vip:Removing 12.12.0.106 (and any VIPs) on vm06.local iface eth0... 2026-03-06T22:29:02.906 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr del 12.12.0.106/22 dev eth0 2026-03-06T22:29:02.934 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr del 12.12.1.101/22 dev eth0 2026-03-06T22:29:02.999 INFO:teuthology.orchestra.run.vm06.stderr:Error: ipv4: Address not found. 2026-03-06T22:29:03.000 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-03-06T22:29:03.000 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-06T22:29:03.003 INFO:tasks.cephadm:Teardown begin 2026-03-06T22:29:03.003 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T22:29:03.033 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T22:29:03.066 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-06T22:29:03.067 DEBUG:teuthology.orchestra.run.vm01:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-06T22:29:03.095 DEBUG:teuthology.orchestra.run.vm06:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-06T22:29:03.124 INFO:tasks.cephadm:Stopping all daemons... 2026-03-06T22:29:03.124 INFO:tasks.cephadm.mon.vm01:Stopping mon.vm01... 2026-03-06T22:29:03.124 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm01 2026-03-06T22:29:03.222 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:03 vm06.local ceph-mon[52574]: from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:03.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:03 vm01.local systemd[1]: Stopping Ceph mon.vm01 for c76e688a-19a2-11f1-bdea-01160fc6f239... 2026-03-06T22:29:03.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:03 vm01.local ceph-mon[46942]: from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T22:29:03.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:03 vm01.local ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm01[46938]: 2026-03-06T21:29:03.237+0000 7f59f5732640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm01 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-06T22:29:03.371 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Mar 06 22:29:03 vm01.local ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm01[46938]: 2026-03-06T21:29:03.237+0000 7f59f5732640 -1 mon.vm01@0(leader) e2 *** Got Signal Terminated *** 2026-03-06T22:29:03.552 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm01.service' 2026-03-06T22:29:03.589 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-06T22:29:03.589 INFO:tasks.cephadm.mon.vm01:Stopped mon.vm01 2026-03-06T22:29:03.589 INFO:tasks.cephadm.mon.vm06:Stopping mon.vm06... 2026-03-06T22:29:03.589 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm06 2026-03-06T22:29:03.838 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:03 vm06.local systemd[1]: Stopping Ceph mon.vm06 for c76e688a-19a2-11f1-bdea-01160fc6f239... 2026-03-06T22:29:03.838 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:03 vm06.local ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06[52570]: 2026-03-06T21:29:03.693+0000 7fa44b987640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm06 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-06T22:29:03.838 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:03 vm06.local ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06[52570]: 2026-03-06T21:29:03.693+0000 7fa44b987640 -1 mon.vm06@1(peon) e2 *** Got Signal Terminated *** 2026-03-06T22:29:03.838 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:03 vm06.local podman[78368]: 2026-03-06 22:29:03.764641227 +0100 CET m=+0.085041183 container died d717d212d7fb4c392fede99894a4e2a8cfe5647fff71bd5fb51eaeb6f74b09a4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-06T22:29:03.838 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:03 vm06.local podman[78368]: 2026-03-06 22:29:03.780198521 +0100 CET m=+0.100598478 container remove d717d212d7fb4c392fede99894a4e2a8cfe5647fff71bd5fb51eaeb6f74b09a4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-06T22:29:03.838 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Mar 06 22:29:03 vm06.local bash[78368]: ceph-c76e688a-19a2-11f1-bdea-01160fc6f239-mon-vm06 2026-03-06T22:29:03.846 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-c76e688a-19a2-11f1-bdea-01160fc6f239@mon.vm06.service' 2026-03-06T22:29:03.879 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-06T22:29:03.879 INFO:tasks.cephadm.mon.vm06:Stopped mon.vm06 2026-03-06T22:29:03.879 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 --force --keep-logs 2026-03-06T22:29:04.181 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:29:44.250 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 --force --keep-logs 2026-03-06T22:29:44.554 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:30:17.951 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T22:30:17.976 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T22:30:18.002 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-06T22:30:18.002 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/crash to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386/remote/vm01/crash 2026-03-06T22:30:18.002 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/crash -- . 2026-03-06T22:30:18.039 INFO:teuthology.orchestra.run.vm01.stderr:tar: /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/crash: Cannot open: No such file or directory 2026-03-06T22:30:18.039 INFO:teuthology.orchestra.run.vm01.stderr:tar: Error is not recoverable: exiting now 2026-03-06T22:30:18.040 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/crash to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386/remote/vm06/crash 2026-03-06T22:30:18.040 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/crash -- . 2026-03-06T22:30:18.067 INFO:teuthology.orchestra.run.vm06.stderr:tar: /var/lib/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/crash: Cannot open: No such file or directory 2026-03-06T22:30:18.067 INFO:teuthology.orchestra.run.vm06.stderr:tar: Error is not recoverable: exiting now 2026-03-06T22:30:18.068 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-06T22:30:18.068 DEBUG:teuthology.orchestra.run.vm01:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-06T22:30:18.107 INFO:tasks.cephadm:Compressing logs... 2026-03-06T22:30:18.107 DEBUG:teuthology.orchestra.run.vm01:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T22:30:18.149 DEBUG:teuthology.orchestra.run.vm06:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T22:30:18.172 INFO:teuthology.orchestra.run.vm06.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-06T22:30:18.172 INFO:teuthology.orchestra.run.vm06.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-06T22:30:18.173 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-volume.log 2026-03-06T22:30:18.174 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-06T22:30:18.174 INFO:teuthology.orchestra.run.vm01.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-06T22:30:18.174 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/cephadm.log: 92.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-06T22:30:18.175 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-client.ceph-exporter.vm06.log 2026-03-06T22:30:18.175 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mgr.vm06.awlziz.log 2026-03-06T22:30:18.175 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-volume.log: /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-client.ceph-exporter.vm06.log: 30.4% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-client.ceph-exporter.vm06.log.gz 2026-03-06T22:30:18.175 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mon.vm01.log 2026-03-06T22:30:18.176 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.log 2026-03-06T22:30:18.176 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mon.vm06.log 2026-03-06T22:30:18.178 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mgr.vm06.awlziz.log: 91.1% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mgr.vm06.awlziz.log.gz 2026-03-06T22:30:18.179 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.audit.log 2026-03-06T22:30:18.180 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mon.vm01.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.audit.log 2026-03-06T22:30:18.182 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.log: 84.9% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.log.gz 2026-03-06T22:30:18.186 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mgr.vm01.mrlynj.log 2026-03-06T22:30:18.188 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.audit.log: 91.0% 91.5% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-06T22:30:18.188 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.cephadm.log 2026-03-06T22:30:18.188 INFO:teuthology.orchestra.run.vm01.stderr: -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.audit.log.gz 2026-03-06T22:30:18.189 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mon.vm06.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.log 2026-03-06T22:30:18.190 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.audit.log: 91.2% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.audit.log.gz 2026-03-06T22:30:18.190 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.cephadm.log 2026-03-06T22:30:18.191 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.log: 84.4% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.log.gz 2026-03-06T22:30:18.191 INFO:teuthology.orchestra.run.vm06.stderr: 93.2% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-volume.log.gz 2026-03-06T22:30:18.192 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.0.log 2026-03-06T22:30:18.192 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.cephadm.log: 82.9% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.cephadm.log.gz 2026-03-06T22:30:18.192 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.3.log 2026-03-06T22:30:18.196 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mgr.vm01.mrlynj.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-volume.log 2026-03-06T22:30:18.197 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.cephadm.log: 83.5% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph.cephadm.log.gz 2026-03-06T22:30:18.200 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.4.log 2026-03-06T22:30:18.200 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-client.ceph-exporter.vm01.log 2026-03-06T22:30:18.207 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.1.log 2026-03-06T22:30:18.208 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-client.ceph-exporter.vm01.log: 93.1% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-client.ceph-exporter.vm01.log.gz 2026-03-06T22:30:18.210 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.6.log 2026-03-06T22:30:18.215 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.2.log 2026-03-06T22:30:18.220 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mds.foofs.vm06.aeobze.log 2026-03-06T22:30:18.223 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.5.log 2026-03-06T22:30:18.224 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.6.log: /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mds.foofs.vm06.aeobze.log: 84.0% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mds.foofs.vm06.aeobze.log.gz 2026-03-06T22:30:18.227 INFO:teuthology.orchestra.run.vm06.stderr: 92.1% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mon.vm06.log.gz 2026-03-06T22:30:18.232 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.2.log: 93.1% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-volume.log.gz 2026-03-06T22:30:18.234 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.7.log 2026-03-06T22:30:18.244 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mds.foofs.vm01.aitcjt.log 2026-03-06T22:30:18.256 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.7.log: /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mds.foofs.vm01.aitcjt.log: 71.8% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mds.foofs.vm01.aitcjt.log.gz 2026-03-06T22:30:18.316 INFO:teuthology.orchestra.run.vm06.stderr: 93.5% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.0.log.gz 2026-03-06T22:30:18.326 INFO:teuthology.orchestra.run.vm06.stderr: 93.5% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.4.log.gz 2026-03-06T22:30:18.331 INFO:teuthology.orchestra.run.vm06.stderr: 93.4% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.6.log.gz 2026-03-06T22:30:18.335 INFO:teuthology.orchestra.run.vm06.stderr: 93.4% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.3.log.gz 2026-03-06T22:30:18.336 INFO:teuthology.orchestra.run.vm06.stderr: 2026-03-06T22:30:18.337 INFO:teuthology.orchestra.run.vm06.stderr:real 0m0.174s 2026-03-06T22:30:18.337 INFO:teuthology.orchestra.run.vm06.stderr:user 0m0.302s 2026-03-06T22:30:18.337 INFO:teuthology.orchestra.run.vm06.stderr:sys 0m0.029s 2026-03-06T22:30:18.358 INFO:teuthology.orchestra.run.vm01.stderr: 89.1% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mgr.vm01.mrlynj.log.gz 2026-03-06T22:30:18.363 INFO:teuthology.orchestra.run.vm01.stderr: 93.2% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.7.log.gz 2026-03-06T22:30:18.365 INFO:teuthology.orchestra.run.vm01.stderr: 93.5% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.2.log.gz 2026-03-06T22:30:18.371 INFO:teuthology.orchestra.run.vm01.stderr: 93.8% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.1.log.gz 2026-03-06T22:30:18.386 INFO:teuthology.orchestra.run.vm01.stderr: 93.5% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-osd.5.log.gz 2026-03-06T22:30:18.424 INFO:teuthology.orchestra.run.vm01.stderr: 91.3% -- replaced with /var/log/ceph/c76e688a-19a2-11f1-bdea-01160fc6f239/ceph-mon.vm01.log.gz 2026-03-06T22:30:18.426 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-06T22:30:18.426 INFO:teuthology.orchestra.run.vm01.stderr:real 0m0.262s 2026-03-06T22:30:18.426 INFO:teuthology.orchestra.run.vm01.stderr:user 0m0.438s 2026-03-06T22:30:18.426 INFO:teuthology.orchestra.run.vm01.stderr:sys 0m0.037s 2026-03-06T22:30:18.426 INFO:tasks.cephadm:Archiving logs... 2026-03-06T22:30:18.426 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/log/ceph to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386/remote/vm01/log 2026-03-06T22:30:18.426 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-06T22:30:18.520 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/log/ceph to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386/remote/vm06/log 2026-03-06T22:30:18.520 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-06T22:30:18.565 INFO:tasks.cephadm:Removing cluster... 2026-03-06T22:30:18.565 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 --force 2026-03-06T22:30:18.853 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:30:18.948 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid c76e688a-19a2-11f1-bdea-01160fc6f239 --force 2026-03-06T22:30:19.244 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: c76e688a-19a2-11f1-bdea-01160fc6f239 2026-03-06T22:30:19.342 INFO:tasks.cephadm:Removing cephadm ... 2026-03-06T22:30:19.343 DEBUG:teuthology.orchestra.run.vm01:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-06T22:30:19.361 DEBUG:teuthology.orchestra.run.vm06:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-06T22:30:19.376 INFO:tasks.cephadm:Teardown complete 2026-03-06T22:30:19.376 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-06T22:30:19.379 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-06T22:30:19.379 DEBUG:teuthology.orchestra.run.vm01:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T22:30:19.403 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T22:30:19.418 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-03-06T22:30:19.431 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-06T22:30:19.514 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T22:30:19.514 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-06T22:30:19.514 INFO:teuthology.orchestra.run.vm06.stdout:^+ mail.sassmann.nrw 2 6 377 60 -1480us[-1513us] +/- 43ms 2026-03-06T22:30:19.514 INFO:teuthology.orchestra.run.vm06.stdout:^+ sv5.ggsrv.de 2 6 77 57 +4414us[+4414us] +/- 25ms 2026-03-06T22:30:19.514 INFO:teuthology.orchestra.run.vm06.stdout:^* ntp01.pingless.com 2 6 377 58 -664us[ -698us] +/- 14ms 2026-03-06T22:30:19.514 INFO:teuthology.orchestra.run.vm06.stdout:^+ meinekiste.de 2 6 377 60 -1417us[-1450us] +/- 17ms 2026-03-06T22:30:19.515 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T22:30:19.515 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-03-06T22:30:19.515 INFO:teuthology.orchestra.run.vm01.stdout:^* ntp01.pingless.com 2 6 377 58 -697us[ -720us] +/- 14ms 2026-03-06T22:30:19.515 INFO:teuthology.orchestra.run.vm01.stdout:^+ sv5.ggsrv.de 2 6 277 55 +4397us[+4397us] +/- 25ms 2026-03-06T22:30:19.515 INFO:teuthology.orchestra.run.vm01.stdout:^+ meinekiste.de 2 6 377 59 -1615us[-1637us] +/- 17ms 2026-03-06T22:30:19.515 INFO:teuthology.orchestra.run.vm01.stdout:^+ mail.sassmann.nrw 2 6 377 60 -1478us[-1500us] +/- 43ms 2026-03-06T22:30:19.515 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-06T22:30:19.517 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-06T22:30:19.518 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-06T22:30:19.520 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-06T22:30:19.522 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-06T22:30:19.524 INFO:teuthology.task.internal:Duration was 484.336020 seconds 2026-03-06T22:30:19.524 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-06T22:30:19.527 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-06T22:30:19.527 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-06T22:30:19.557 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-06T22:30:19.594 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T22:30:19.597 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T22:30:19.931 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-06T22:30:19.931 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm01.local 2026-03-06T22:30:19.932 DEBUG:teuthology.orchestra.run.vm01:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-06T22:30:19.954 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-06T22:30:19.955 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-06T22:30:19.997 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-06T22:30:19.997 DEBUG:teuthology.orchestra.run.vm01:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T22:30:19.999 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T22:30:20.406 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-06T22:30:20.407 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T22:30:20.408 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T22:30:20.428 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T22:30:20.429 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T22:30:20.429 INFO:teuthology.orchestra.run.vm01.stderr:gzip/home/ubuntu/cephtest/archive/syslog/kern.log: -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T22:30:20.429 INFO:teuthology.orchestra.run.vm01.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-06T22:30:20.429 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-06T22:30:20.430 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T22:30:20.430 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T22:30:20.430 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-06T22:30:20.430 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T22:30:20.431 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-06T22:30:20.530 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.2% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-06T22:30:20.545 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.8% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-06T22:30:20.547 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-06T22:30:20.549 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-06T22:30:20.550 DEBUG:teuthology.orchestra.run.vm01:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-06T22:30:20.610 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-06T22:30:20.634 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-06T22:30:20.637 DEBUG:teuthology.orchestra.run.vm01:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-06T22:30:20.653 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-06T22:30:20.680 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = core 2026-03-06T22:30:20.701 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-06T22:30:20.713 DEBUG:teuthology.orchestra.run.vm01:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-06T22:30:20.748 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:30:20.748 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-06T22:30:20.768 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T22:30:20.768 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-06T22:30:20.771 INFO:teuthology.task.internal:Transferring archived files... 2026-03-06T22:30:20.771 DEBUG:teuthology.misc:Transferring archived files from vm01:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386/remote/vm01 2026-03-06T22:30:20.771 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-06T22:30:20.817 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/386/remote/vm06 2026-03-06T22:30:20.817 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-06T22:30:20.844 INFO:teuthology.task.internal:Removing archive directory... 2026-03-06T22:30:20.844 DEBUG:teuthology.orchestra.run.vm01:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-06T22:30:20.859 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-06T22:30:20.897 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-06T22:30:20.900 INFO:teuthology.task.internal:Not uploading archives. 2026-03-06T22:30:20.900 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-06T22:30:20.903 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-06T22:30:20.903 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-06T22:30:20.917 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-06T22:30:20.931 INFO:teuthology.orchestra.run.vm01.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 6 22:30 /home/ubuntu/cephtest 2026-03-06T22:30:20.953 INFO:teuthology.orchestra.run.vm06.stdout: 8532143 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 6 22:30 /home/ubuntu/cephtest 2026-03-06T22:30:20.953 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-06T22:30:20.959 INFO:teuthology.run:Summary data: description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 1-start 2-services/nfs-ingress2 3-final} duration: 484.3360197544098 owner: irq0 success: true 2026-03-06T22:30:20.959 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-06T22:30:20.976 INFO:teuthology.run:pass